* expr.c (store_field): Don't set MEM_ALIAS_SET for a field
[official-gcc.git] / gcc / loop.c
blob60d7e2e16ea89db235adb2783f2e2aa6c1b1f7e2
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
56 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
57 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
59 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
60 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
61 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
64 /* Vector mapping INSN_UIDs to luids.
65 The luids are like uids but increase monotonically always.
66 We use them to see whether a jump comes from outside a given loop. */
68 int *uid_luid;
70 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
71 number the insn is contained in. */
73 struct loop **uid_loop;
75 /* 1 + largest uid of any insn. */
77 int max_uid_for_loop;
79 /* 1 + luid of last insn. */
81 static int max_luid;
83 /* Number of loops detected in current function. Used as index to the
84 next few tables. */
86 static int max_loop_num;
88 /* Bound on pseudo register number before loop optimization.
89 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
90 unsigned int max_reg_before_loop;
92 /* The value to pass to the next call of reg_scan_update. */
93 static int loop_max_reg;
95 #define obstack_chunk_alloc xmalloc
96 #define obstack_chunk_free free
98 /* During the analysis of a loop, a chain of `struct movable's
99 is made to record all the movable insns found.
100 Then the entire chain can be scanned to decide which to move. */
102 struct movable
104 rtx insn; /* A movable insn */
105 rtx set_src; /* The expression this reg is set from. */
106 rtx set_dest; /* The destination of this SET. */
107 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
108 of any registers used within the LIBCALL. */
109 int consec; /* Number of consecutive following insns
110 that must be moved with this one. */
111 unsigned int regno; /* The register it sets */
112 short lifetime; /* lifetime of that register;
113 may be adjusted when matching movables
114 that load the same value are found. */
115 short savings; /* Number of insns we can move for this reg,
116 including other movables that force this
117 or match this one. */
118 unsigned int cond : 1; /* 1 if only conditionally movable */
119 unsigned int force : 1; /* 1 means MUST move this insn */
120 unsigned int global : 1; /* 1 means reg is live outside this loop */
121 /* If PARTIAL is 1, GLOBAL means something different:
122 that the reg is live outside the range from where it is set
123 to the following label. */
124 unsigned int done : 1; /* 1 inhibits further processing of this */
126 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
127 In particular, moving it does not make it
128 invariant. */
129 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
130 load SRC, rather than copying INSN. */
131 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
132 first insn of a consecutive sets group. */
133 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
134 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
135 that we should avoid changing when clearing
136 the rest of the reg. */
137 struct movable *match; /* First entry for same value */
138 struct movable *forces; /* An insn that must be moved if this is */
139 struct movable *next;
143 FILE *loop_dump_stream;
145 /* Forward declarations. */
147 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
148 static void mark_loop_jump PARAMS ((rtx, struct loop *));
149 static void prescan_loop PARAMS ((struct loop *));
150 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
151 static int consec_sets_invariant_p PARAMS ((const struct loop *,
152 rtx, int, rtx));
153 static int labels_in_range_p PARAMS ((rtx, int));
154 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
155 static void note_addr_stored PARAMS ((rtx, rtx, void *));
156 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
157 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
158 static void scan_loop PARAMS ((struct loop*, int));
159 #if 0
160 static void replace_call_address PARAMS ((rtx, rtx, rtx));
161 #endif
162 static rtx skip_consec_insns PARAMS ((rtx, int));
163 static int libcall_benefit PARAMS ((rtx));
164 static void ignore_some_movables PARAMS ((struct loop_movables *));
165 static void force_movables PARAMS ((struct loop_movables *));
166 static void combine_movables PARAMS ((struct loop_movables *,
167 struct loop_regs *));
168 static int num_unmoved_movables PARAMS ((const struct loop *));
169 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
170 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
171 struct loop_regs *));
172 static void add_label_notes PARAMS ((rtx, rtx));
173 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
174 int, int));
175 static void loop_movables_add PARAMS((struct loop_movables *,
176 struct movable *));
177 static void loop_movables_free PARAMS((struct loop_movables *));
178 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
179 static void loop_bivs_find PARAMS((struct loop *));
180 static void loop_bivs_init_find PARAMS((struct loop *));
181 static void loop_bivs_check PARAMS((struct loop *));
182 static void loop_givs_find PARAMS((struct loop *));
183 static void loop_givs_check PARAMS((struct loop *));
184 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
185 int, int));
186 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
187 struct induction *, rtx));
188 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
189 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
190 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
191 rtx *));
192 static void loop_ivs_free PARAMS((struct loop *));
193 static void strength_reduce PARAMS ((struct loop *, int));
194 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
195 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
196 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
197 static void record_biv PARAMS ((struct loop *, struct induction *,
198 rtx, rtx, rtx, rtx, rtx *,
199 int, int));
200 static void check_final_value PARAMS ((const struct loop *,
201 struct induction *));
202 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
203 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
204 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
205 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
206 static void record_giv PARAMS ((const struct loop *, struct induction *,
207 rtx, rtx, rtx, rtx, rtx, rtx, int,
208 enum g_types, int, int, rtx *));
209 static void update_giv_derive PARAMS ((const struct loop *, rtx));
210 static void check_ext_dependant_givs PARAMS ((struct iv_class *,
211 struct loop_info *));
212 static int basic_induction_var PARAMS ((const struct loop *, rtx,
213 enum machine_mode, rtx, rtx,
214 rtx *, rtx *, rtx **));
215 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
216 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
217 rtx *, rtx *, rtx *, int, int *,
218 enum machine_mode));
219 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
220 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
221 static int check_dbra_loop PARAMS ((struct loop *, int));
222 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
223 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
224 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
225 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
226 static int product_cheap_p PARAMS ((rtx, rtx));
227 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
228 int, int, int));
229 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
230 struct iv_class *, int,
231 basic_block, rtx));
232 static int last_use_this_basic_block PARAMS ((rtx, rtx));
233 static void record_initial PARAMS ((rtx, rtx, void *));
234 static void update_reg_last_use PARAMS ((rtx, rtx));
235 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
236 static void loop_regs_scan PARAMS ((const struct loop *, int));
237 static int count_insns_in_loop PARAMS ((const struct loop *));
238 static void load_mems PARAMS ((const struct loop *));
239 static int insert_loop_mem PARAMS ((rtx *, void *));
240 static int replace_loop_mem PARAMS ((rtx *, void *));
241 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
242 static int replace_loop_reg PARAMS ((rtx *, void *));
243 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
244 static void note_reg_stored PARAMS ((rtx, rtx, void *));
245 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
246 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
247 unsigned int));
248 static int replace_label PARAMS ((rtx *, void *));
249 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
250 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
251 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
252 static void loop_regs_update PARAMS ((const struct loop *, rtx));
253 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
255 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
256 rtx, rtx));
257 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
258 basic_block, rtx, rtx));
259 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
260 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
262 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
263 static void loop_delete_insns PARAMS ((rtx, rtx));
264 void debug_ivs PARAMS ((const struct loop *));
265 void debug_iv_class PARAMS ((const struct iv_class *));
266 void debug_biv PARAMS ((const struct induction *));
267 void debug_giv PARAMS ((const struct induction *));
268 void debug_loop PARAMS ((const struct loop *));
269 void debug_loops PARAMS ((const struct loops *));
271 typedef struct rtx_pair
273 rtx r1;
274 rtx r2;
275 } rtx_pair;
277 typedef struct loop_replace_args
279 rtx match;
280 rtx replacement;
281 rtx insn;
282 } loop_replace_args;
284 /* Nonzero iff INSN is between START and END, inclusive. */
285 #define INSN_IN_RANGE_P(INSN, START, END) \
286 (INSN_UID (INSN) < max_uid_for_loop \
287 && INSN_LUID (INSN) >= INSN_LUID (START) \
288 && INSN_LUID (INSN) <= INSN_LUID (END))
290 /* Indirect_jump_in_function is computed once per function. */
291 static int indirect_jump_in_function;
292 static int indirect_jump_in_function_p PARAMS ((rtx));
294 static int compute_luids PARAMS ((rtx, rtx, int));
296 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
297 struct induction *,
298 rtx));
300 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
301 copy the value of the strength reduced giv to its original register. */
302 static int copy_cost;
304 /* Cost of using a register, to normalize the benefits of a giv. */
305 static int reg_address_cost;
307 void
308 init_loop ()
310 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
312 reg_address_cost = address_cost (reg, SImode);
314 copy_cost = COSTS_N_INSNS (1);
317 /* Compute the mapping from uids to luids.
318 LUIDs are numbers assigned to insns, like uids,
319 except that luids increase monotonically through the code.
320 Start at insn START and stop just before END. Assign LUIDs
321 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
322 static int
323 compute_luids (start, end, prev_luid)
324 rtx start, end;
325 int prev_luid;
327 int i;
328 rtx insn;
330 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
332 if (INSN_UID (insn) >= max_uid_for_loop)
333 continue;
334 /* Don't assign luids to line-number NOTEs, so that the distance in
335 luids between two insns is not affected by -g. */
336 if (GET_CODE (insn) != NOTE
337 || NOTE_LINE_NUMBER (insn) <= 0)
338 uid_luid[INSN_UID (insn)] = ++i;
339 else
340 /* Give a line number note the same luid as preceding insn. */
341 uid_luid[INSN_UID (insn)] = i;
343 return i + 1;
346 /* Entry point of this file. Perform loop optimization
347 on the current function. F is the first insn of the function
348 and DUMPFILE is a stream for output of a trace of actions taken
349 (or 0 if none should be output). */
351 void
352 loop_optimize (f, dumpfile, flags)
353 /* f is the first instruction of a chain of insns for one function */
354 rtx f;
355 FILE *dumpfile;
356 int flags;
358 register rtx insn;
359 register int i;
360 struct loops loops_data;
361 struct loops *loops = &loops_data;
362 struct loop_info *loops_info;
364 loop_dump_stream = dumpfile;
366 init_recog_no_volatile ();
368 max_reg_before_loop = max_reg_num ();
369 loop_max_reg = max_reg_before_loop;
371 regs_may_share = 0;
373 /* Count the number of loops. */
375 max_loop_num = 0;
376 for (insn = f; insn; insn = NEXT_INSN (insn))
378 if (GET_CODE (insn) == NOTE
379 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
380 max_loop_num++;
383 /* Don't waste time if no loops. */
384 if (max_loop_num == 0)
385 return;
387 loops->num = max_loop_num;
389 /* Get size to use for tables indexed by uids.
390 Leave some space for labels allocated by find_and_verify_loops. */
391 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
393 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
394 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
395 sizeof (struct loop *));
397 /* Allocate storage for array of loops. */
398 loops->array = (struct loop *)
399 xcalloc (loops->num, sizeof (struct loop));
401 /* Find and process each loop.
402 First, find them, and record them in order of their beginnings. */
403 find_and_verify_loops (f, loops);
405 /* Allocate and initialize auxiliary loop information. */
406 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
407 for (i = 0; i < loops->num; i++)
408 loops->array[i].aux = loops_info + i;
410 /* Now find all register lifetimes. This must be done after
411 find_and_verify_loops, because it might reorder the insns in the
412 function. */
413 reg_scan (f, max_reg_before_loop, 1);
415 /* This must occur after reg_scan so that registers created by gcse
416 will have entries in the register tables.
418 We could have added a call to reg_scan after gcse_main in toplev.c,
419 but moving this call to init_alias_analysis is more efficient. */
420 init_alias_analysis ();
422 /* See if we went too far. Note that get_max_uid already returns
423 one more that the maximum uid of all insn. */
424 if (get_max_uid () > max_uid_for_loop)
425 abort ();
426 /* Now reset it to the actual size we need. See above. */
427 max_uid_for_loop = get_max_uid ();
429 /* find_and_verify_loops has already called compute_luids, but it
430 might have rearranged code afterwards, so we need to recompute
431 the luids now. */
432 max_luid = compute_luids (f, NULL_RTX, 0);
434 /* Don't leave gaps in uid_luid for insns that have been
435 deleted. It is possible that the first or last insn
436 using some register has been deleted by cross-jumping.
437 Make sure that uid_luid for that former insn's uid
438 points to the general area where that insn used to be. */
439 for (i = 0; i < max_uid_for_loop; i++)
441 uid_luid[0] = uid_luid[i];
442 if (uid_luid[0] != 0)
443 break;
445 for (i = 0; i < max_uid_for_loop; i++)
446 if (uid_luid[i] == 0)
447 uid_luid[i] = uid_luid[i - 1];
449 /* Determine if the function has indirect jump. On some systems
450 this prevents low overhead loop instructions from being used. */
451 indirect_jump_in_function = indirect_jump_in_function_p (f);
453 /* Now scan the loops, last ones first, since this means inner ones are done
454 before outer ones. */
455 for (i = max_loop_num - 1; i >= 0; i--)
457 struct loop *loop = &loops->array[i];
459 if (! loop->invalid && loop->end)
460 scan_loop (loop, flags);
463 /* If there were lexical blocks inside the loop, they have been
464 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
465 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
466 the BLOCKs as well. */
467 if (write_symbols != NO_DEBUG)
468 reorder_blocks ();
470 end_alias_analysis ();
472 /* Clean up. */
473 free (uid_luid);
474 free (uid_loop);
475 free (loops_info);
476 free (loops->array);
479 /* Returns the next insn, in execution order, after INSN. START and
480 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
481 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
482 insn-stream; it is used with loops that are entered near the
483 bottom. */
485 static rtx
486 next_insn_in_loop (loop, insn)
487 const struct loop *loop;
488 rtx insn;
490 insn = NEXT_INSN (insn);
492 if (insn == loop->end)
494 if (loop->top)
495 /* Go to the top of the loop, and continue there. */
496 insn = loop->top;
497 else
498 /* We're done. */
499 insn = NULL_RTX;
502 if (insn == loop->scan_start)
503 /* We're done. */
504 insn = NULL_RTX;
506 return insn;
509 /* Optimize one loop described by LOOP. */
511 /* ??? Could also move memory writes out of loops if the destination address
512 is invariant, the source is invariant, the memory write is not volatile,
513 and if we can prove that no read inside the loop can read this address
514 before the write occurs. If there is a read of this address after the
515 write, then we can also mark the memory read as invariant. */
517 static void
518 scan_loop (loop, flags)
519 struct loop *loop;
520 int flags;
522 struct loop_info *loop_info = LOOP_INFO (loop);
523 struct loop_regs *regs = LOOP_REGS (loop);
524 register int i;
525 rtx loop_start = loop->start;
526 rtx loop_end = loop->end;
527 rtx p;
528 /* 1 if we are scanning insns that could be executed zero times. */
529 int maybe_never = 0;
530 /* 1 if we are scanning insns that might never be executed
531 due to a subroutine call which might exit before they are reached. */
532 int call_passed = 0;
533 /* Jump insn that enters the loop, or 0 if control drops in. */
534 rtx loop_entry_jump = 0;
535 /* Number of insns in the loop. */
536 int insn_count;
537 int tem;
538 rtx temp, update_start, update_end;
539 /* The SET from an insn, if it is the only SET in the insn. */
540 rtx set, set1;
541 /* Chain describing insns movable in current loop. */
542 struct loop_movables *movables = LOOP_MOVABLES (loop);
543 /* Ratio of extra register life span we can justify
544 for saving an instruction. More if loop doesn't call subroutines
545 since in that case saving an insn makes more difference
546 and more registers are available. */
547 int threshold;
548 /* Nonzero if we are scanning instructions in a sub-loop. */
549 int loop_depth = 0;
551 loop->top = 0;
553 movables->head = 0;
554 movables->last = 0;
556 /* Determine whether this loop starts with a jump down to a test at
557 the end. This will occur for a small number of loops with a test
558 that is too complex to duplicate in front of the loop.
560 We search for the first insn or label in the loop, skipping NOTEs.
561 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
562 (because we might have a loop executed only once that contains a
563 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
564 (in case we have a degenerate loop).
566 Note that if we mistakenly think that a loop is entered at the top
567 when, in fact, it is entered at the exit test, the only effect will be
568 slightly poorer optimization. Making the opposite error can generate
569 incorrect code. Since very few loops now start with a jump to the
570 exit test, the code here to detect that case is very conservative. */
572 for (p = NEXT_INSN (loop_start);
573 p != loop_end
574 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
575 && (GET_CODE (p) != NOTE
576 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
577 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
578 p = NEXT_INSN (p))
581 loop->scan_start = p;
583 /* If loop end is the end of the current function, then emit a
584 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
585 note insn. This is the position we use when sinking insns out of
586 the loop. */
587 if (NEXT_INSN (loop->end) != 0)
588 loop->sink = NEXT_INSN (loop->end);
589 else
590 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
592 /* Set up variables describing this loop. */
593 prescan_loop (loop);
594 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
596 /* If loop has a jump before the first label,
597 the true entry is the target of that jump.
598 Start scan from there.
599 But record in LOOP->TOP the place where the end-test jumps
600 back to so we can scan that after the end of the loop. */
601 if (GET_CODE (p) == JUMP_INSN)
603 loop_entry_jump = p;
605 /* Loop entry must be unconditional jump (and not a RETURN) */
606 if (any_uncondjump_p (p)
607 && JUMP_LABEL (p) != 0
608 /* Check to see whether the jump actually
609 jumps out of the loop (meaning it's no loop).
610 This case can happen for things like
611 do {..} while (0). If this label was generated previously
612 by loop, we can't tell anything about it and have to reject
613 the loop. */
614 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
616 loop->top = next_label (loop->scan_start);
617 loop->scan_start = JUMP_LABEL (p);
621 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
622 as required by loop_reg_used_before_p. So skip such loops. (This
623 test may never be true, but it's best to play it safe.)
625 Also, skip loops where we do not start scanning at a label. This
626 test also rejects loops starting with a JUMP_INSN that failed the
627 test above. */
629 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
630 || GET_CODE (loop->scan_start) != CODE_LABEL)
632 if (loop_dump_stream)
633 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
634 INSN_UID (loop_start), INSN_UID (loop_end));
635 return;
638 /* Allocate extra space for REGs that might be created by load_mems.
639 We allocate a little extra slop as well, in the hopes that we
640 won't have to reallocate the regs array. */
641 loop_regs_scan (loop, loop_info->mems_idx + 16);
642 insn_count = count_insns_in_loop (loop);
644 if (loop_dump_stream)
646 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
647 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
648 if (loop->cont)
649 fprintf (loop_dump_stream, "Continue at insn %d.\n",
650 INSN_UID (loop->cont));
653 /* Scan through the loop finding insns that are safe to move.
654 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
655 this reg will be considered invariant for subsequent insns.
656 We consider whether subsequent insns use the reg
657 in deciding whether it is worth actually moving.
659 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
660 and therefore it is possible that the insns we are scanning
661 would never be executed. At such times, we must make sure
662 that it is safe to execute the insn once instead of zero times.
663 When MAYBE_NEVER is 0, all insns will be executed at least once
664 so that is not a problem. */
666 for (p = next_insn_in_loop (loop, loop->scan_start);
667 p != NULL_RTX;
668 p = next_insn_in_loop (loop, p))
670 if (GET_CODE (p) == INSN
671 && (set = single_set (p))
672 && GET_CODE (SET_DEST (set)) == REG
673 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
675 int tem1 = 0;
676 int tem2 = 0;
677 int move_insn = 0;
678 rtx src = SET_SRC (set);
679 rtx dependencies = 0;
681 /* Figure out what to use as a source of this insn. If a REG_EQUIV
682 note is given or if a REG_EQUAL note with a constant operand is
683 specified, use it as the source and mark that we should move
684 this insn by calling emit_move_insn rather that duplicating the
685 insn.
687 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
688 is present. */
689 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
690 if (temp)
691 src = XEXP (temp, 0), move_insn = 1;
692 else
694 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
695 if (temp && CONSTANT_P (XEXP (temp, 0)))
696 src = XEXP (temp, 0), move_insn = 1;
697 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
699 src = XEXP (temp, 0);
700 /* A libcall block can use regs that don't appear in
701 the equivalent expression. To move the libcall,
702 we must move those regs too. */
703 dependencies = libcall_other_reg (p, src);
707 /* Don't try to optimize a register that was made
708 by loop-optimization for an inner loop.
709 We don't know its life-span, so we can't compute the benefit. */
710 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
712 else if (/* The register is used in basic blocks other
713 than the one where it is set (meaning that
714 something after this point in the loop might
715 depend on its value before the set). */
716 ! reg_in_basic_block_p (p, SET_DEST (set))
717 /* And the set is not guaranteed to be executed one
718 the loop starts, or the value before the set is
719 needed before the set occurs...
721 ??? Note we have quadratic behaviour here, mitigated
722 by the fact that the previous test will often fail for
723 large loops. Rather than re-scanning the entire loop
724 each time for register usage, we should build tables
725 of the register usage and use them here instead. */
726 && (maybe_never
727 || loop_reg_used_before_p (loop, set, p)))
728 /* It is unsafe to move the set.
730 This code used to consider it OK to move a set of a variable
731 which was not created by the user and not used in an exit test.
732 That behavior is incorrect and was removed. */
734 else if ((tem = loop_invariant_p (loop, src))
735 && (dependencies == 0
736 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
737 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
738 || (tem1
739 = consec_sets_invariant_p
740 (loop, SET_DEST (set),
741 regs->array[REGNO (SET_DEST (set))].set_in_loop,
742 p)))
743 /* If the insn can cause a trap (such as divide by zero),
744 can't move it unless it's guaranteed to be executed
745 once loop is entered. Even a function call might
746 prevent the trap insn from being reached
747 (since it might exit!) */
748 && ! ((maybe_never || call_passed)
749 && may_trap_p (src)))
751 register struct movable *m;
752 register int regno = REGNO (SET_DEST (set));
754 /* A potential lossage is where we have a case where two insns
755 can be combined as long as they are both in the loop, but
756 we move one of them outside the loop. For large loops,
757 this can lose. The most common case of this is the address
758 of a function being called.
760 Therefore, if this register is marked as being used exactly
761 once if we are in a loop with calls (a "large loop"), see if
762 we can replace the usage of this register with the source
763 of this SET. If we can, delete this insn.
765 Don't do this if P has a REG_RETVAL note or if we have
766 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
768 if (loop_info->has_call
769 && regs->array[regno].single_usage != 0
770 && regs->array[regno].single_usage != const0_rtx
771 && REGNO_FIRST_UID (regno) == INSN_UID (p)
772 && (REGNO_LAST_UID (regno)
773 == INSN_UID (regs->array[regno].single_usage))
774 && regs->array[regno].set_in_loop == 1
775 && ! side_effects_p (SET_SRC (set))
776 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
777 && (! SMALL_REGISTER_CLASSES
778 || (! (GET_CODE (SET_SRC (set)) == REG
779 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
780 /* This test is not redundant; SET_SRC (set) might be
781 a call-clobbered register and the life of REGNO
782 might span a call. */
783 && ! modified_between_p (SET_SRC (set), p,
784 regs->array[regno].single_usage)
785 && no_labels_between_p (p, regs->array[regno].single_usage)
786 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
787 regs->array[regno].single_usage))
789 /* Replace any usage in a REG_EQUAL note. Must copy the
790 new source, so that we don't get rtx sharing between the
791 SET_SOURCE and REG_NOTES of insn p. */
792 REG_NOTES (regs->array[regno].single_usage)
793 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
794 SET_DEST (set), copy_rtx (SET_SRC (set)));
796 PUT_CODE (p, NOTE);
797 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
798 NOTE_SOURCE_FILE (p) = 0;
799 regs->array[regno].set_in_loop = 0;
800 continue;
803 m = (struct movable *) xmalloc (sizeof (struct movable));
804 m->next = 0;
805 m->insn = p;
806 m->set_src = src;
807 m->dependencies = dependencies;
808 m->set_dest = SET_DEST (set);
809 m->force = 0;
810 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
811 m->done = 0;
812 m->forces = 0;
813 m->partial = 0;
814 m->move_insn = move_insn;
815 m->move_insn_first = 0;
816 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
817 m->savemode = VOIDmode;
818 m->regno = regno;
819 /* Set M->cond if either loop_invariant_p
820 or consec_sets_invariant_p returned 2
821 (only conditionally invariant). */
822 m->cond = ((tem | tem1 | tem2) > 1);
823 m->global = LOOP_REG_GLOBAL_P (loop, regno);
824 m->match = 0;
825 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
826 m->savings = regs->array[regno].n_times_set;
827 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
828 m->savings += libcall_benefit (p);
829 regs->array[regno].set_in_loop = move_insn ? -2 : -1;
830 /* Add M to the end of the chain MOVABLES. */
831 loop_movables_add (movables, m);
833 if (m->consec > 0)
835 /* It is possible for the first instruction to have a
836 REG_EQUAL note but a non-invariant SET_SRC, so we must
837 remember the status of the first instruction in case
838 the last instruction doesn't have a REG_EQUAL note. */
839 m->move_insn_first = m->move_insn;
841 /* Skip this insn, not checking REG_LIBCALL notes. */
842 p = next_nonnote_insn (p);
843 /* Skip the consecutive insns, if there are any. */
844 p = skip_consec_insns (p, m->consec);
845 /* Back up to the last insn of the consecutive group. */
846 p = prev_nonnote_insn (p);
848 /* We must now reset m->move_insn, m->is_equiv, and possibly
849 m->set_src to correspond to the effects of all the
850 insns. */
851 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
852 if (temp)
853 m->set_src = XEXP (temp, 0), m->move_insn = 1;
854 else
856 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
857 if (temp && CONSTANT_P (XEXP (temp, 0)))
858 m->set_src = XEXP (temp, 0), m->move_insn = 1;
859 else
860 m->move_insn = 0;
863 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
866 /* If this register is always set within a STRICT_LOW_PART
867 or set to zero, then its high bytes are constant.
868 So clear them outside the loop and within the loop
869 just load the low bytes.
870 We must check that the machine has an instruction to do so.
871 Also, if the value loaded into the register
872 depends on the same register, this cannot be done. */
873 else if (SET_SRC (set) == const0_rtx
874 && GET_CODE (NEXT_INSN (p)) == INSN
875 && (set1 = single_set (NEXT_INSN (p)))
876 && GET_CODE (set1) == SET
877 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
878 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
879 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
880 == SET_DEST (set))
881 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
883 register int regno = REGNO (SET_DEST (set));
884 if (regs->array[regno].set_in_loop == 2)
886 register struct movable *m;
887 m = (struct movable *) xmalloc (sizeof (struct movable));
888 m->next = 0;
889 m->insn = p;
890 m->set_dest = SET_DEST (set);
891 m->dependencies = 0;
892 m->force = 0;
893 m->consec = 0;
894 m->done = 0;
895 m->forces = 0;
896 m->move_insn = 0;
897 m->move_insn_first = 0;
898 m->partial = 1;
899 /* If the insn may not be executed on some cycles,
900 we can't clear the whole reg; clear just high part.
901 Not even if the reg is used only within this loop.
902 Consider this:
903 while (1)
904 while (s != t) {
905 if (foo ()) x = *s;
906 use (x);
908 Clearing x before the inner loop could clobber a value
909 being saved from the last time around the outer loop.
910 However, if the reg is not used outside this loop
911 and all uses of the register are in the same
912 basic block as the store, there is no problem.
914 If this insn was made by loop, we don't know its
915 INSN_LUID and hence must make a conservative
916 assumption. */
917 m->global = (INSN_UID (p) >= max_uid_for_loop
918 || LOOP_REG_GLOBAL_P (loop, regno)
919 || (labels_in_range_p
920 (p, REGNO_FIRST_LUID (regno))));
921 if (maybe_never && m->global)
922 m->savemode = GET_MODE (SET_SRC (set1));
923 else
924 m->savemode = VOIDmode;
925 m->regno = regno;
926 m->cond = 0;
927 m->match = 0;
928 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
929 m->savings = 1;
930 regs->array[regno].set_in_loop = -1;
931 /* Add M to the end of the chain MOVABLES. */
932 loop_movables_add (movables, m);
936 /* Past a call insn, we get to insns which might not be executed
937 because the call might exit. This matters for insns that trap.
938 Constant and pure call insns always return, so they don't count. */
939 else if (GET_CODE (p) == CALL_INSN && ! CONST_CALL_P (p))
940 call_passed = 1;
941 /* Past a label or a jump, we get to insns for which we
942 can't count on whether or how many times they will be
943 executed during each iteration. Therefore, we can
944 only move out sets of trivial variables
945 (those not used after the loop). */
946 /* Similar code appears twice in strength_reduce. */
947 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
948 /* If we enter the loop in the middle, and scan around to the
949 beginning, don't set maybe_never for that. This must be an
950 unconditional jump, otherwise the code at the top of the
951 loop might never be executed. Unconditional jumps are
952 followed a by barrier then loop end. */
953 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
954 && NEXT_INSN (NEXT_INSN (p)) == loop_end
955 && any_uncondjump_p (p)))
956 maybe_never = 1;
957 else if (GET_CODE (p) == NOTE)
959 /* At the virtual top of a converted loop, insns are again known to
960 be executed: logically, the loop begins here even though the exit
961 code has been duplicated. */
962 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
963 maybe_never = call_passed = 0;
964 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
965 loop_depth++;
966 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
967 loop_depth--;
971 /* If one movable subsumes another, ignore that other. */
973 ignore_some_movables (movables);
975 /* For each movable insn, see if the reg that it loads
976 leads when it dies right into another conditionally movable insn.
977 If so, record that the second insn "forces" the first one,
978 since the second can be moved only if the first is. */
980 force_movables (movables);
982 /* See if there are multiple movable insns that load the same value.
983 If there are, make all but the first point at the first one
984 through the `match' field, and add the priorities of them
985 all together as the priority of the first. */
987 combine_movables (movables, regs);
989 /* Now consider each movable insn to decide whether it is worth moving.
990 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
992 Generally this increases code size, so do not move moveables when
993 optimizing for code size. */
995 if (! optimize_size)
996 move_movables (loop, movables, threshold, insn_count);
998 /* Now candidates that still are negative are those not moved.
999 Change regs->array[I].set_in_loop to indicate that those are not actually
1000 invariant. */
1001 for (i = 0; i < regs->num; i++)
1002 if (regs->array[i].set_in_loop < 0)
1003 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1005 /* Now that we've moved some things out of the loop, we might be able to
1006 hoist even more memory references. */
1007 load_mems (loop);
1009 /* Recalculate regs->array if load_mems has created new registers. */
1010 if (max_reg_num () > regs->num)
1011 loop_regs_scan (loop, 0);
1013 for (update_start = loop_start;
1014 PREV_INSN (update_start)
1015 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1016 update_start = PREV_INSN (update_start))
1018 update_end = NEXT_INSN (loop_end);
1020 reg_scan_update (update_start, update_end, loop_max_reg);
1021 loop_max_reg = max_reg_num ();
1023 if (flag_strength_reduce)
1025 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1026 /* Ensure our label doesn't go away. */
1027 LABEL_NUSES (update_end)++;
1029 strength_reduce (loop, flags);
1031 reg_scan_update (update_start, update_end, loop_max_reg);
1032 loop_max_reg = max_reg_num ();
1034 if (update_end && GET_CODE (update_end) == CODE_LABEL
1035 && --LABEL_NUSES (update_end) == 0)
1036 delete_insn (update_end);
1040 /* The movable information is required for strength reduction. */
1041 loop_movables_free (movables);
1043 free (regs->array);
1044 regs->array = 0;
1045 regs->num = 0;
1048 /* Add elements to *OUTPUT to record all the pseudo-regs
1049 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1051 void
1052 record_excess_regs (in_this, not_in_this, output)
1053 rtx in_this, not_in_this;
1054 rtx *output;
1056 enum rtx_code code;
1057 const char *fmt;
1058 int i;
1060 code = GET_CODE (in_this);
1062 switch (code)
1064 case PC:
1065 case CC0:
1066 case CONST_INT:
1067 case CONST_DOUBLE:
1068 case CONST:
1069 case SYMBOL_REF:
1070 case LABEL_REF:
1071 return;
1073 case REG:
1074 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1075 && ! reg_mentioned_p (in_this, not_in_this))
1076 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1077 return;
1079 default:
1080 break;
1083 fmt = GET_RTX_FORMAT (code);
1084 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1086 int j;
1088 switch (fmt[i])
1090 case 'E':
1091 for (j = 0; j < XVECLEN (in_this, i); j++)
1092 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1093 break;
1095 case 'e':
1096 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1097 break;
1102 /* Check what regs are referred to in the libcall block ending with INSN,
1103 aside from those mentioned in the equivalent value.
1104 If there are none, return 0.
1105 If there are one or more, return an EXPR_LIST containing all of them. */
1108 libcall_other_reg (insn, equiv)
1109 rtx insn, equiv;
1111 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1112 rtx p = XEXP (note, 0);
1113 rtx output = 0;
1115 /* First, find all the regs used in the libcall block
1116 that are not mentioned as inputs to the result. */
1118 while (p != insn)
1120 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1121 || GET_CODE (p) == CALL_INSN)
1122 record_excess_regs (PATTERN (p), equiv, &output);
1123 p = NEXT_INSN (p);
1126 return output;
1129 /* Return 1 if all uses of REG
1130 are between INSN and the end of the basic block. */
1132 static int
1133 reg_in_basic_block_p (insn, reg)
1134 rtx insn, reg;
1136 int regno = REGNO (reg);
1137 rtx p;
1139 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1140 return 0;
1142 /* Search this basic block for the already recorded last use of the reg. */
1143 for (p = insn; p; p = NEXT_INSN (p))
1145 switch (GET_CODE (p))
1147 case NOTE:
1148 break;
1150 case INSN:
1151 case CALL_INSN:
1152 /* Ordinary insn: if this is the last use, we win. */
1153 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1154 return 1;
1155 break;
1157 case JUMP_INSN:
1158 /* Jump insn: if this is the last use, we win. */
1159 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1160 return 1;
1161 /* Otherwise, it's the end of the basic block, so we lose. */
1162 return 0;
1164 case CODE_LABEL:
1165 case BARRIER:
1166 /* It's the end of the basic block, so we lose. */
1167 return 0;
1169 default:
1170 break;
1174 /* The "last use" that was recorded can't be found after the first
1175 use. This can happen when the last use was deleted while
1176 processing an inner loop, this inner loop was then completely
1177 unrolled, and the outer loop is always exited after the inner loop,
1178 so that everything after the first use becomes a single basic block. */
1179 return 1;
1182 /* Compute the benefit of eliminating the insns in the block whose
1183 last insn is LAST. This may be a group of insns used to compute a
1184 value directly or can contain a library call. */
1186 static int
1187 libcall_benefit (last)
1188 rtx last;
1190 rtx insn;
1191 int benefit = 0;
1193 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1194 insn != last; insn = NEXT_INSN (insn))
1196 if (GET_CODE (insn) == CALL_INSN)
1197 benefit += 10; /* Assume at least this many insns in a library
1198 routine. */
1199 else if (GET_CODE (insn) == INSN
1200 && GET_CODE (PATTERN (insn)) != USE
1201 && GET_CODE (PATTERN (insn)) != CLOBBER)
1202 benefit++;
1205 return benefit;
1208 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1210 static rtx
1211 skip_consec_insns (insn, count)
1212 rtx insn;
1213 int count;
1215 for (; count > 0; count--)
1217 rtx temp;
1219 /* If first insn of libcall sequence, skip to end. */
1220 /* Do this at start of loop, since INSN is guaranteed to
1221 be an insn here. */
1222 if (GET_CODE (insn) != NOTE
1223 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1224 insn = XEXP (temp, 0);
1227 insn = NEXT_INSN (insn);
1228 while (GET_CODE (insn) == NOTE);
1231 return insn;
1234 /* Ignore any movable whose insn falls within a libcall
1235 which is part of another movable.
1236 We make use of the fact that the movable for the libcall value
1237 was made later and so appears later on the chain. */
1239 static void
1240 ignore_some_movables (movables)
1241 struct loop_movables *movables;
1243 register struct movable *m, *m1;
1245 for (m = movables->head; m; m = m->next)
1247 /* Is this a movable for the value of a libcall? */
1248 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1249 if (note)
1251 rtx insn;
1252 /* Check for earlier movables inside that range,
1253 and mark them invalid. We cannot use LUIDs here because
1254 insns created by loop.c for prior loops don't have LUIDs.
1255 Rather than reject all such insns from movables, we just
1256 explicitly check each insn in the libcall (since invariant
1257 libcalls aren't that common). */
1258 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1259 for (m1 = movables->head; m1 != m; m1 = m1->next)
1260 if (m1->insn == insn)
1261 m1->done = 1;
1266 /* For each movable insn, see if the reg that it loads
1267 leads when it dies right into another conditionally movable insn.
1268 If so, record that the second insn "forces" the first one,
1269 since the second can be moved only if the first is. */
1271 static void
1272 force_movables (movables)
1273 struct loop_movables *movables;
1275 register struct movable *m, *m1;
1276 for (m1 = movables->head; m1; m1 = m1->next)
1277 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1278 if (!m1->partial && !m1->done)
1280 int regno = m1->regno;
1281 for (m = m1->next; m; m = m->next)
1282 /* ??? Could this be a bug? What if CSE caused the
1283 register of M1 to be used after this insn?
1284 Since CSE does not update regno_last_uid,
1285 this insn M->insn might not be where it dies.
1286 But very likely this doesn't matter; what matters is
1287 that M's reg is computed from M1's reg. */
1288 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1289 && !m->done)
1290 break;
1291 if (m != 0 && m->set_src == m1->set_dest
1292 /* If m->consec, m->set_src isn't valid. */
1293 && m->consec == 0)
1294 m = 0;
1296 /* Increase the priority of the moving the first insn
1297 since it permits the second to be moved as well. */
1298 if (m != 0)
1300 m->forces = m1;
1301 m1->lifetime += m->lifetime;
1302 m1->savings += m->savings;
1307 /* Find invariant expressions that are equal and can be combined into
1308 one register. */
1310 static void
1311 combine_movables (movables, regs)
1312 struct loop_movables *movables;
1313 struct loop_regs *regs;
1315 register struct movable *m;
1316 char *matched_regs = (char *) xmalloc (regs->num);
1317 enum machine_mode mode;
1319 /* Regs that are set more than once are not allowed to match
1320 or be matched. I'm no longer sure why not. */
1321 /* Perhaps testing m->consec_sets would be more appropriate here? */
1323 for (m = movables->head; m; m = m->next)
1324 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1325 && !m->partial)
1327 register struct movable *m1;
1328 int regno = m->regno;
1330 memset (matched_regs, 0, regs->num);
1331 matched_regs[regno] = 1;
1333 /* We want later insns to match the first one. Don't make the first
1334 one match any later ones. So start this loop at m->next. */
1335 for (m1 = m->next; m1; m1 = m1->next)
1336 if (m != m1 && m1->match == 0
1337 && regs->array[m1->regno].n_times_set == 1
1338 /* A reg used outside the loop mustn't be eliminated. */
1339 && !m1->global
1340 /* A reg used for zero-extending mustn't be eliminated. */
1341 && !m1->partial
1342 && (matched_regs[m1->regno]
1345 /* Can combine regs with different modes loaded from the
1346 same constant only if the modes are the same or
1347 if both are integer modes with M wider or the same
1348 width as M1. The check for integer is redundant, but
1349 safe, since the only case of differing destination
1350 modes with equal sources is when both sources are
1351 VOIDmode, i.e., CONST_INT. */
1352 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1353 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1354 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1355 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1356 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1357 /* See if the source of M1 says it matches M. */
1358 && ((GET_CODE (m1->set_src) == REG
1359 && matched_regs[REGNO (m1->set_src)])
1360 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1361 movables, regs))))
1362 && ((m->dependencies == m1->dependencies)
1363 || rtx_equal_p (m->dependencies, m1->dependencies)))
1365 m->lifetime += m1->lifetime;
1366 m->savings += m1->savings;
1367 m1->done = 1;
1368 m1->match = m;
1369 matched_regs[m1->regno] = 1;
1373 /* Now combine the regs used for zero-extension.
1374 This can be done for those not marked `global'
1375 provided their lives don't overlap. */
1377 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1378 mode = GET_MODE_WIDER_MODE (mode))
1380 register struct movable *m0 = 0;
1382 /* Combine all the registers for extension from mode MODE.
1383 Don't combine any that are used outside this loop. */
1384 for (m = movables->head; m; m = m->next)
1385 if (m->partial && ! m->global
1386 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1388 register struct movable *m1;
1389 int first = REGNO_FIRST_LUID (m->regno);
1390 int last = REGNO_LAST_LUID (m->regno);
1392 if (m0 == 0)
1394 /* First one: don't check for overlap, just record it. */
1395 m0 = m;
1396 continue;
1399 /* Make sure they extend to the same mode.
1400 (Almost always true.) */
1401 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1402 continue;
1404 /* We already have one: check for overlap with those
1405 already combined together. */
1406 for (m1 = movables->head; m1 != m; m1 = m1->next)
1407 if (m1 == m0 || (m1->partial && m1->match == m0))
1408 if (! (REGNO_FIRST_LUID (m1->regno) > last
1409 || REGNO_LAST_LUID (m1->regno) < first))
1410 goto overlap;
1412 /* No overlap: we can combine this with the others. */
1413 m0->lifetime += m->lifetime;
1414 m0->savings += m->savings;
1415 m->done = 1;
1416 m->match = m0;
1418 overlap:
1423 /* Clean up. */
1424 free (matched_regs);
1427 /* Returns the number of movable instructions in LOOP that were not
1428 moved outside the loop. */
1430 static int
1431 num_unmoved_movables (loop)
1432 const struct loop *loop;
1434 int num = 0;
1435 struct movable *m;
1437 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1438 if (!m->done)
1439 ++num;
1441 return num;
1445 /* Return 1 if regs X and Y will become the same if moved. */
1447 static int
1448 regs_match_p (x, y, movables)
1449 rtx x, y;
1450 struct loop_movables *movables;
1452 unsigned int xn = REGNO (x);
1453 unsigned int yn = REGNO (y);
1454 struct movable *mx, *my;
1456 for (mx = movables->head; mx; mx = mx->next)
1457 if (mx->regno == xn)
1458 break;
1460 for (my = movables->head; my; my = my->next)
1461 if (my->regno == yn)
1462 break;
1464 return (mx && my
1465 && ((mx->match == my->match && mx->match != 0)
1466 || mx->match == my
1467 || mx == my->match));
1470 /* Return 1 if X and Y are identical-looking rtx's.
1471 This is the Lisp function EQUAL for rtx arguments.
1473 If two registers are matching movables or a movable register and an
1474 equivalent constant, consider them equal. */
1476 static int
1477 rtx_equal_for_loop_p (x, y, movables, regs)
1478 rtx x, y;
1479 struct loop_movables *movables;
1480 struct loop_regs *regs;
1482 register int i;
1483 register int j;
1484 register struct movable *m;
1485 register enum rtx_code code;
1486 register const char *fmt;
1488 if (x == y)
1489 return 1;
1490 if (x == 0 || y == 0)
1491 return 0;
1493 code = GET_CODE (x);
1495 /* If we have a register and a constant, they may sometimes be
1496 equal. */
1497 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1498 && CONSTANT_P (y))
1500 for (m = movables->head; m; m = m->next)
1501 if (m->move_insn && m->regno == REGNO (x)
1502 && rtx_equal_p (m->set_src, y))
1503 return 1;
1505 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1506 && CONSTANT_P (x))
1508 for (m = movables->head; m; m = m->next)
1509 if (m->move_insn && m->regno == REGNO (y)
1510 && rtx_equal_p (m->set_src, x))
1511 return 1;
1514 /* Otherwise, rtx's of different codes cannot be equal. */
1515 if (code != GET_CODE (y))
1516 return 0;
1518 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1519 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1521 if (GET_MODE (x) != GET_MODE (y))
1522 return 0;
1524 /* These three types of rtx's can be compared nonrecursively. */
1525 if (code == REG)
1526 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1528 if (code == LABEL_REF)
1529 return XEXP (x, 0) == XEXP (y, 0);
1530 if (code == SYMBOL_REF)
1531 return XSTR (x, 0) == XSTR (y, 0);
1533 /* Compare the elements. If any pair of corresponding elements
1534 fail to match, return 0 for the whole things. */
1536 fmt = GET_RTX_FORMAT (code);
1537 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1539 switch (fmt[i])
1541 case 'w':
1542 if (XWINT (x, i) != XWINT (y, i))
1543 return 0;
1544 break;
1546 case 'i':
1547 if (XINT (x, i) != XINT (y, i))
1548 return 0;
1549 break;
1551 case 'E':
1552 /* Two vectors must have the same length. */
1553 if (XVECLEN (x, i) != XVECLEN (y, i))
1554 return 0;
1556 /* And the corresponding elements must match. */
1557 for (j = 0; j < XVECLEN (x, i); j++)
1558 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1559 movables, regs) == 0)
1560 return 0;
1561 break;
1563 case 'e':
1564 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1565 == 0)
1566 return 0;
1567 break;
1569 case 's':
1570 if (strcmp (XSTR (x, i), XSTR (y, i)))
1571 return 0;
1572 break;
1574 case 'u':
1575 /* These are just backpointers, so they don't matter. */
1576 break;
1578 case '0':
1579 break;
1581 /* It is believed that rtx's at this level will never
1582 contain anything but integers and other rtx's,
1583 except for within LABEL_REFs and SYMBOL_REFs. */
1584 default:
1585 abort ();
1588 return 1;
1591 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1592 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1593 references is incremented once for each added note. */
1595 static void
1596 add_label_notes (x, insns)
1597 rtx x;
1598 rtx insns;
1600 enum rtx_code code = GET_CODE (x);
1601 int i, j;
1602 const char *fmt;
1603 rtx insn;
1605 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1607 /* This code used to ignore labels that referred to dispatch tables to
1608 avoid flow generating (slighly) worse code.
1610 We no longer ignore such label references (see LABEL_REF handling in
1611 mark_jump_label for additional information). */
1612 for (insn = insns; insn; insn = NEXT_INSN (insn))
1613 if (reg_mentioned_p (XEXP (x, 0), insn))
1615 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1616 REG_NOTES (insn));
1617 if (LABEL_P (XEXP (x, 0)))
1618 LABEL_NUSES (XEXP (x, 0))++;
1622 fmt = GET_RTX_FORMAT (code);
1623 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1625 if (fmt[i] == 'e')
1626 add_label_notes (XEXP (x, i), insns);
1627 else if (fmt[i] == 'E')
1628 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1629 add_label_notes (XVECEXP (x, i, j), insns);
1633 /* Scan MOVABLES, and move the insns that deserve to be moved.
1634 If two matching movables are combined, replace one reg with the
1635 other throughout. */
1637 static void
1638 move_movables (loop, movables, threshold, insn_count)
1639 struct loop *loop;
1640 struct loop_movables *movables;
1641 int threshold;
1642 int insn_count;
1644 struct loop_regs *regs = LOOP_REGS (loop);
1645 int nregs = regs->num;
1646 rtx new_start = 0;
1647 register struct movable *m;
1648 register rtx p;
1649 rtx loop_start = loop->start;
1650 rtx loop_end = loop->end;
1651 /* Map of pseudo-register replacements to handle combining
1652 when we move several insns that load the same value
1653 into different pseudo-registers. */
1654 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1655 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1657 for (m = movables->head; m; m = m->next)
1659 /* Describe this movable insn. */
1661 if (loop_dump_stream)
1663 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1664 INSN_UID (m->insn), m->regno, m->lifetime);
1665 if (m->consec > 0)
1666 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1667 if (m->cond)
1668 fprintf (loop_dump_stream, "cond ");
1669 if (m->force)
1670 fprintf (loop_dump_stream, "force ");
1671 if (m->global)
1672 fprintf (loop_dump_stream, "global ");
1673 if (m->done)
1674 fprintf (loop_dump_stream, "done ");
1675 if (m->move_insn)
1676 fprintf (loop_dump_stream, "move-insn ");
1677 if (m->match)
1678 fprintf (loop_dump_stream, "matches %d ",
1679 INSN_UID (m->match->insn));
1680 if (m->forces)
1681 fprintf (loop_dump_stream, "forces %d ",
1682 INSN_UID (m->forces->insn));
1685 /* Ignore the insn if it's already done (it matched something else).
1686 Otherwise, see if it is now safe to move. */
1688 if (!m->done
1689 && (! m->cond
1690 || (1 == loop_invariant_p (loop, m->set_src)
1691 && (m->dependencies == 0
1692 || 1 == loop_invariant_p (loop, m->dependencies))
1693 && (m->consec == 0
1694 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1695 m->consec + 1,
1696 m->insn))))
1697 && (! m->forces || m->forces->done))
1699 register int regno;
1700 register rtx p;
1701 int savings = m->savings;
1703 /* We have an insn that is safe to move.
1704 Compute its desirability. */
1706 p = m->insn;
1707 regno = m->regno;
1709 if (loop_dump_stream)
1710 fprintf (loop_dump_stream, "savings %d ", savings);
1712 if (regs->array[regno].moved_once && loop_dump_stream)
1713 fprintf (loop_dump_stream, "halved since already moved ");
1715 /* An insn MUST be moved if we already moved something else
1716 which is safe only if this one is moved too: that is,
1717 if already_moved[REGNO] is nonzero. */
1719 /* An insn is desirable to move if the new lifetime of the
1720 register is no more than THRESHOLD times the old lifetime.
1721 If it's not desirable, it means the loop is so big
1722 that moving won't speed things up much,
1723 and it is liable to make register usage worse. */
1725 /* It is also desirable to move if it can be moved at no
1726 extra cost because something else was already moved. */
1728 if (already_moved[regno]
1729 || flag_move_all_movables
1730 || (threshold * savings * m->lifetime) >=
1731 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1732 || (m->forces && m->forces->done
1733 && regs->array[m->forces->regno].n_times_set == 1))
1735 int count;
1736 register struct movable *m1;
1737 rtx first = NULL_RTX;
1739 /* Now move the insns that set the reg. */
1741 if (m->partial && m->match)
1743 rtx newpat, i1;
1744 rtx r1, r2;
1745 /* Find the end of this chain of matching regs.
1746 Thus, we load each reg in the chain from that one reg.
1747 And that reg is loaded with 0 directly,
1748 since it has ->match == 0. */
1749 for (m1 = m; m1->match; m1 = m1->match);
1750 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1751 SET_DEST (PATTERN (m1->insn)));
1752 i1 = loop_insn_hoist (loop, newpat);
1754 /* Mark the moved, invariant reg as being allowed to
1755 share a hard reg with the other matching invariant. */
1756 REG_NOTES (i1) = REG_NOTES (m->insn);
1757 r1 = SET_DEST (PATTERN (m->insn));
1758 r2 = SET_DEST (PATTERN (m1->insn));
1759 regs_may_share
1760 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1761 gen_rtx_EXPR_LIST (VOIDmode, r2,
1762 regs_may_share));
1763 delete_insn (m->insn);
1765 if (new_start == 0)
1766 new_start = i1;
1768 if (loop_dump_stream)
1769 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1771 /* If we are to re-generate the item being moved with a
1772 new move insn, first delete what we have and then emit
1773 the move insn before the loop. */
1774 else if (m->move_insn)
1776 rtx i1, temp, seq;
1778 for (count = m->consec; count >= 0; count--)
1780 /* If this is the first insn of a library call sequence,
1781 skip to the end. */
1782 if (GET_CODE (p) != NOTE
1783 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1784 p = XEXP (temp, 0);
1786 /* If this is the last insn of a libcall sequence, then
1787 delete every insn in the sequence except the last.
1788 The last insn is handled in the normal manner. */
1789 if (GET_CODE (p) != NOTE
1790 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1792 temp = XEXP (temp, 0);
1793 while (temp != p)
1794 temp = delete_insn (temp);
1797 temp = p;
1798 p = delete_insn (p);
1800 /* simplify_giv_expr expects that it can walk the insns
1801 at m->insn forwards and see this old sequence we are
1802 tossing here. delete_insn does preserve the next
1803 pointers, but when we skip over a NOTE we must fix
1804 it up. Otherwise that code walks into the non-deleted
1805 insn stream. */
1806 while (p && GET_CODE (p) == NOTE)
1807 p = NEXT_INSN (temp) = NEXT_INSN (p);
1810 start_sequence ();
1811 emit_move_insn (m->set_dest, m->set_src);
1812 temp = get_insns ();
1813 seq = gen_sequence ();
1814 end_sequence ();
1816 add_label_notes (m->set_src, temp);
1818 i1 = loop_insn_hoist (loop, seq);
1819 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1820 REG_NOTES (i1)
1821 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1822 m->set_src, REG_NOTES (i1));
1824 if (loop_dump_stream)
1825 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1827 /* The more regs we move, the less we like moving them. */
1828 threshold -= 3;
1830 else
1832 for (count = m->consec; count >= 0; count--)
1834 rtx i1, temp;
1836 /* If first insn of libcall sequence, skip to end. */
1837 /* Do this at start of loop, since p is guaranteed to
1838 be an insn here. */
1839 if (GET_CODE (p) != NOTE
1840 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1841 p = XEXP (temp, 0);
1843 /* If last insn of libcall sequence, move all
1844 insns except the last before the loop. The last
1845 insn is handled in the normal manner. */
1846 if (GET_CODE (p) != NOTE
1847 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1849 rtx fn_address = 0;
1850 rtx fn_reg = 0;
1851 rtx fn_address_insn = 0;
1853 first = 0;
1854 for (temp = XEXP (temp, 0); temp != p;
1855 temp = NEXT_INSN (temp))
1857 rtx body;
1858 rtx n;
1859 rtx next;
1861 if (GET_CODE (temp) == NOTE)
1862 continue;
1864 body = PATTERN (temp);
1866 /* Find the next insn after TEMP,
1867 not counting USE or NOTE insns. */
1868 for (next = NEXT_INSN (temp); next != p;
1869 next = NEXT_INSN (next))
1870 if (! (GET_CODE (next) == INSN
1871 && GET_CODE (PATTERN (next)) == USE)
1872 && GET_CODE (next) != NOTE)
1873 break;
1875 /* If that is the call, this may be the insn
1876 that loads the function address.
1878 Extract the function address from the insn
1879 that loads it into a register.
1880 If this insn was cse'd, we get incorrect code.
1882 So emit a new move insn that copies the
1883 function address into the register that the
1884 call insn will use. flow.c will delete any
1885 redundant stores that we have created. */
1886 if (GET_CODE (next) == CALL_INSN
1887 && GET_CODE (body) == SET
1888 && GET_CODE (SET_DEST (body)) == REG
1889 && (n = find_reg_note (temp, REG_EQUAL,
1890 NULL_RTX)))
1892 fn_reg = SET_SRC (body);
1893 if (GET_CODE (fn_reg) != REG)
1894 fn_reg = SET_DEST (body);
1895 fn_address = XEXP (n, 0);
1896 fn_address_insn = temp;
1898 /* We have the call insn.
1899 If it uses the register we suspect it might,
1900 load it with the correct address directly. */
1901 if (GET_CODE (temp) == CALL_INSN
1902 && fn_address != 0
1903 && reg_referenced_p (fn_reg, body))
1904 loop_insn_emit_after (loop, 0, fn_address_insn,
1905 gen_move_insn
1906 (fn_reg, fn_address));
1908 if (GET_CODE (temp) == CALL_INSN)
1910 i1 = loop_call_insn_hoist (loop, body);
1911 /* Because the USAGE information potentially
1912 contains objects other than hard registers
1913 we need to copy it. */
1914 if (CALL_INSN_FUNCTION_USAGE (temp))
1915 CALL_INSN_FUNCTION_USAGE (i1)
1916 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1918 else
1919 i1 = loop_insn_hoist (loop, body);
1920 if (first == 0)
1921 first = i1;
1922 if (temp == fn_address_insn)
1923 fn_address_insn = i1;
1924 REG_NOTES (i1) = REG_NOTES (temp);
1925 delete_insn (temp);
1927 if (new_start == 0)
1928 new_start = first;
1930 if (m->savemode != VOIDmode)
1932 /* P sets REG to zero; but we should clear only
1933 the bits that are not covered by the mode
1934 m->savemode. */
1935 rtx reg = m->set_dest;
1936 rtx sequence;
1937 rtx tem;
1939 start_sequence ();
1940 tem = expand_binop
1941 (GET_MODE (reg), and_optab, reg,
1942 GEN_INT ((((HOST_WIDE_INT) 1
1943 << GET_MODE_BITSIZE (m->savemode)))
1944 - 1),
1945 reg, 1, OPTAB_LIB_WIDEN);
1946 if (tem == 0)
1947 abort ();
1948 if (tem != reg)
1949 emit_move_insn (reg, tem);
1950 sequence = gen_sequence ();
1951 end_sequence ();
1952 i1 = loop_insn_hoist (loop, sequence);
1954 else if (GET_CODE (p) == CALL_INSN)
1956 i1 = loop_call_insn_hoist (loop, PATTERN (p));
1957 /* Because the USAGE information potentially
1958 contains objects other than hard registers
1959 we need to copy it. */
1960 if (CALL_INSN_FUNCTION_USAGE (p))
1961 CALL_INSN_FUNCTION_USAGE (i1)
1962 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1964 else if (count == m->consec && m->move_insn_first)
1966 rtx seq;
1967 /* The SET_SRC might not be invariant, so we must
1968 use the REG_EQUAL note. */
1969 start_sequence ();
1970 emit_move_insn (m->set_dest, m->set_src);
1971 temp = get_insns ();
1972 seq = gen_sequence ();
1973 end_sequence ();
1975 add_label_notes (m->set_src, temp);
1977 i1 = loop_insn_hoist (loop, seq);
1978 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1979 REG_NOTES (i1)
1980 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1981 : REG_EQUAL),
1982 m->set_src, REG_NOTES (i1));
1984 else
1985 i1 = loop_insn_hoist (loop, PATTERN (p));
1987 if (REG_NOTES (i1) == 0)
1989 REG_NOTES (i1) = REG_NOTES (p);
1991 /* If there is a REG_EQUAL note present whose value
1992 is not loop invariant, then delete it, since it
1993 may cause problems with later optimization passes.
1994 It is possible for cse to create such notes
1995 like this as a result of record_jump_cond. */
1997 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1998 && ! loop_invariant_p (loop, XEXP (temp, 0)))
1999 remove_note (i1, temp);
2002 if (new_start == 0)
2003 new_start = i1;
2005 if (loop_dump_stream)
2006 fprintf (loop_dump_stream, " moved to %d",
2007 INSN_UID (i1));
2009 /* If library call, now fix the REG_NOTES that contain
2010 insn pointers, namely REG_LIBCALL on FIRST
2011 and REG_RETVAL on I1. */
2012 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2014 XEXP (temp, 0) = first;
2015 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2016 XEXP (temp, 0) = i1;
2019 temp = p;
2020 delete_insn (p);
2021 p = NEXT_INSN (p);
2023 /* simplify_giv_expr expects that it can walk the insns
2024 at m->insn forwards and see this old sequence we are
2025 tossing here. delete_insn does preserve the next
2026 pointers, but when we skip over a NOTE we must fix
2027 it up. Otherwise that code walks into the non-deleted
2028 insn stream. */
2029 while (p && GET_CODE (p) == NOTE)
2030 p = NEXT_INSN (temp) = NEXT_INSN (p);
2033 /* The more regs we move, the less we like moving them. */
2034 threshold -= 3;
2037 /* Any other movable that loads the same register
2038 MUST be moved. */
2039 already_moved[regno] = 1;
2041 /* This reg has been moved out of one loop. */
2042 regs->array[regno].moved_once = 1;
2044 /* The reg set here is now invariant. */
2045 if (! m->partial)
2046 regs->array[regno].set_in_loop = 0;
2048 m->done = 1;
2050 /* Change the length-of-life info for the register
2051 to say it lives at least the full length of this loop.
2052 This will help guide optimizations in outer loops. */
2054 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2055 /* This is the old insn before all the moved insns.
2056 We can't use the moved insn because it is out of range
2057 in uid_luid. Only the old insns have luids. */
2058 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2059 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2060 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2062 /* Combine with this moved insn any other matching movables. */
2064 if (! m->partial)
2065 for (m1 = movables->head; m1; m1 = m1->next)
2066 if (m1->match == m)
2068 rtx temp;
2070 /* Schedule the reg loaded by M1
2071 for replacement so that shares the reg of M.
2072 If the modes differ (only possible in restricted
2073 circumstances, make a SUBREG.
2075 Note this assumes that the target dependent files
2076 treat REG and SUBREG equally, including within
2077 GO_IF_LEGITIMATE_ADDRESS and in all the
2078 predicates since we never verify that replacing the
2079 original register with a SUBREG results in a
2080 recognizable insn. */
2081 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2082 reg_map[m1->regno] = m->set_dest;
2083 else
2084 reg_map[m1->regno]
2085 = gen_lowpart_common (GET_MODE (m1->set_dest),
2086 m->set_dest);
2088 /* Get rid of the matching insn
2089 and prevent further processing of it. */
2090 m1->done = 1;
2092 /* if library call, delete all insn except last, which
2093 is deleted below */
2094 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2095 NULL_RTX)))
2097 for (temp = XEXP (temp, 0); temp != m1->insn;
2098 temp = NEXT_INSN (temp))
2099 delete_insn (temp);
2101 delete_insn (m1->insn);
2103 /* Any other movable that loads the same register
2104 MUST be moved. */
2105 already_moved[m1->regno] = 1;
2107 /* The reg merged here is now invariant,
2108 if the reg it matches is invariant. */
2109 if (! m->partial)
2110 regs->array[m1->regno].set_in_loop = 0;
2113 else if (loop_dump_stream)
2114 fprintf (loop_dump_stream, "not desirable");
2116 else if (loop_dump_stream && !m->match)
2117 fprintf (loop_dump_stream, "not safe");
2119 if (loop_dump_stream)
2120 fprintf (loop_dump_stream, "\n");
2123 if (new_start == 0)
2124 new_start = loop_start;
2126 /* Go through all the instructions in the loop, making
2127 all the register substitutions scheduled in REG_MAP. */
2128 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2129 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2130 || GET_CODE (p) == CALL_INSN)
2132 replace_regs (PATTERN (p), reg_map, nregs, 0);
2133 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2134 INSN_CODE (p) = -1;
2137 /* Clean up. */
2138 free (reg_map);
2139 free (already_moved);
2143 static void
2144 loop_movables_add (movables, m)
2145 struct loop_movables *movables;
2146 struct movable *m;
2148 if (movables->head == 0)
2149 movables->head = m;
2150 else
2151 movables->last->next = m;
2152 movables->last = m;
2156 static void
2157 loop_movables_free (movables)
2158 struct loop_movables *movables;
2160 struct movable *m;
2161 struct movable *m_next;
2163 for (m = movables->head; m; m = m_next)
2165 m_next = m->next;
2166 free (m);
2170 #if 0
2171 /* Scan X and replace the address of any MEM in it with ADDR.
2172 REG is the address that MEM should have before the replacement. */
2174 static void
2175 replace_call_address (x, reg, addr)
2176 rtx x, reg, addr;
2178 register enum rtx_code code;
2179 register int i;
2180 register const char *fmt;
2182 if (x == 0)
2183 return;
2184 code = GET_CODE (x);
2185 switch (code)
2187 case PC:
2188 case CC0:
2189 case CONST_INT:
2190 case CONST_DOUBLE:
2191 case CONST:
2192 case SYMBOL_REF:
2193 case LABEL_REF:
2194 case REG:
2195 return;
2197 case SET:
2198 /* Short cut for very common case. */
2199 replace_call_address (XEXP (x, 1), reg, addr);
2200 return;
2202 case CALL:
2203 /* Short cut for very common case. */
2204 replace_call_address (XEXP (x, 0), reg, addr);
2205 return;
2207 case MEM:
2208 /* If this MEM uses a reg other than the one we expected,
2209 something is wrong. */
2210 if (XEXP (x, 0) != reg)
2211 abort ();
2212 XEXP (x, 0) = addr;
2213 return;
2215 default:
2216 break;
2219 fmt = GET_RTX_FORMAT (code);
2220 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2222 if (fmt[i] == 'e')
2223 replace_call_address (XEXP (x, i), reg, addr);
2224 else if (fmt[i] == 'E')
2226 register int j;
2227 for (j = 0; j < XVECLEN (x, i); j++)
2228 replace_call_address (XVECEXP (x, i, j), reg, addr);
2232 #endif
2234 /* Return the number of memory refs to addresses that vary
2235 in the rtx X. */
2237 static int
2238 count_nonfixed_reads (loop, x)
2239 const struct loop *loop;
2240 rtx x;
2242 register enum rtx_code code;
2243 register int i;
2244 register const char *fmt;
2245 int value;
2247 if (x == 0)
2248 return 0;
2250 code = GET_CODE (x);
2251 switch (code)
2253 case PC:
2254 case CC0:
2255 case CONST_INT:
2256 case CONST_DOUBLE:
2257 case CONST:
2258 case SYMBOL_REF:
2259 case LABEL_REF:
2260 case REG:
2261 return 0;
2263 case MEM:
2264 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2265 + count_nonfixed_reads (loop, XEXP (x, 0)));
2267 default:
2268 break;
2271 value = 0;
2272 fmt = GET_RTX_FORMAT (code);
2273 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2275 if (fmt[i] == 'e')
2276 value += count_nonfixed_reads (loop, XEXP (x, i));
2277 if (fmt[i] == 'E')
2279 register int j;
2280 for (j = 0; j < XVECLEN (x, i); j++)
2281 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2284 return value;
2287 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2288 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2289 `unknown_address_altered', `unknown_constant_address_altered', and
2290 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2291 list `store_mems' in LOOP. */
2293 static void
2294 prescan_loop (loop)
2295 struct loop *loop;
2297 register int level = 1;
2298 rtx insn;
2299 struct loop_info *loop_info = LOOP_INFO (loop);
2300 rtx start = loop->start;
2301 rtx end = loop->end;
2302 /* The label after END. Jumping here is just like falling off the
2303 end of the loop. We use next_nonnote_insn instead of next_label
2304 as a hedge against the (pathological) case where some actual insn
2305 might end up between the two. */
2306 rtx exit_target = next_nonnote_insn (end);
2308 loop_info->has_indirect_jump = indirect_jump_in_function;
2309 loop_info->pre_header_has_call = 0;
2310 loop_info->has_call = 0;
2311 loop_info->has_nonconst_call = 0;
2312 loop_info->has_volatile = 0;
2313 loop_info->has_tablejump = 0;
2314 loop_info->has_multiple_exit_targets = 0;
2315 loop->level = 1;
2317 loop_info->unknown_address_altered = 0;
2318 loop_info->unknown_constant_address_altered = 0;
2319 loop_info->store_mems = NULL_RTX;
2320 loop_info->first_loop_store_insn = NULL_RTX;
2321 loop_info->mems_idx = 0;
2322 loop_info->num_mem_sets = 0;
2325 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2326 insn = PREV_INSN (insn))
2328 if (GET_CODE (insn) == CALL_INSN)
2330 loop_info->pre_header_has_call = 1;
2331 break;
2335 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2336 insn = NEXT_INSN (insn))
2338 if (GET_CODE (insn) == NOTE)
2340 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2342 ++level;
2343 /* Count number of loops contained in this one. */
2344 loop->level++;
2346 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2348 --level;
2351 else if (GET_CODE (insn) == CALL_INSN)
2353 if (! CONST_CALL_P (insn))
2355 loop_info->unknown_address_altered = 1;
2356 loop_info->has_nonconst_call = 1;
2358 loop_info->has_call = 1;
2360 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2362 rtx label1 = NULL_RTX;
2363 rtx label2 = NULL_RTX;
2365 if (volatile_refs_p (PATTERN (insn)))
2366 loop_info->has_volatile = 1;
2368 if (GET_CODE (insn) == JUMP_INSN
2369 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2370 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2371 loop_info->has_tablejump = 1;
2373 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2374 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2375 loop_info->first_loop_store_insn = insn;
2377 if (! loop_info->has_multiple_exit_targets
2378 && GET_CODE (insn) == JUMP_INSN
2379 && GET_CODE (PATTERN (insn)) == SET
2380 && SET_DEST (PATTERN (insn)) == pc_rtx)
2382 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2384 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2385 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2387 else
2389 label1 = SET_SRC (PATTERN (insn));
2394 if (label1 && label1 != pc_rtx)
2396 if (GET_CODE (label1) != LABEL_REF)
2398 /* Something tricky. */
2399 loop_info->has_multiple_exit_targets = 1;
2400 break;
2402 else if (XEXP (label1, 0) != exit_target
2403 && LABEL_OUTSIDE_LOOP_P (label1))
2405 /* A jump outside the current loop. */
2406 loop_info->has_multiple_exit_targets = 1;
2407 break;
2411 label1 = label2;
2412 label2 = NULL_RTX;
2414 while (label1);
2417 else if (GET_CODE (insn) == RETURN)
2418 loop_info->has_multiple_exit_targets = 1;
2421 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2422 if (/* An exception thrown by a called function might land us
2423 anywhere. */
2424 ! loop_info->has_nonconst_call
2425 /* We don't want loads for MEMs moved to a location before the
2426 one at which their stack memory becomes allocated. (Note
2427 that this is not a problem for malloc, etc., since those
2428 require actual function calls. */
2429 && ! current_function_calls_alloca
2430 /* There are ways to leave the loop other than falling off the
2431 end. */
2432 && ! loop_info->has_multiple_exit_targets)
2433 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2434 insn = NEXT_INSN (insn))
2435 for_each_rtx (&insn, insert_loop_mem, loop_info);
2437 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2438 that loop_invariant_p and load_mems can use true_dependence
2439 to determine what is really clobbered. */
2440 if (loop_info->unknown_address_altered)
2442 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2444 loop_info->store_mems
2445 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2447 if (loop_info->unknown_constant_address_altered)
2449 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2451 RTX_UNCHANGING_P (mem) = 1;
2452 loop_info->store_mems
2453 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2457 /* Scan the function looking for loops. Record the start and end of each loop.
2458 Also mark as invalid loops any loops that contain a setjmp or are branched
2459 to from outside the loop. */
2461 static void
2462 find_and_verify_loops (f, loops)
2463 rtx f;
2464 struct loops *loops;
2466 rtx insn;
2467 rtx label;
2468 int num_loops;
2469 struct loop *current_loop;
2470 struct loop *next_loop;
2471 struct loop *loop;
2473 num_loops = loops->num;
2475 compute_luids (f, NULL_RTX, 0);
2477 /* If there are jumps to undefined labels,
2478 treat them as jumps out of any/all loops.
2479 This also avoids writing past end of tables when there are no loops. */
2480 uid_loop[0] = NULL;
2482 /* Find boundaries of loops, mark which loops are contained within
2483 loops, and invalidate loops that have setjmp. */
2485 num_loops = 0;
2486 current_loop = NULL;
2487 for (insn = f; insn; insn = NEXT_INSN (insn))
2489 if (GET_CODE (insn) == NOTE)
2490 switch (NOTE_LINE_NUMBER (insn))
2492 case NOTE_INSN_LOOP_BEG:
2493 next_loop = loops->array + num_loops;
2494 next_loop->num = num_loops;
2495 num_loops++;
2496 next_loop->start = insn;
2497 next_loop->outer = current_loop;
2498 current_loop = next_loop;
2499 break;
2501 case NOTE_INSN_SETJMP:
2502 /* In this case, we must invalidate our current loop and any
2503 enclosing loop. */
2504 for (loop = current_loop; loop; loop = loop->outer)
2506 loop->invalid = 1;
2507 if (loop_dump_stream)
2508 fprintf (loop_dump_stream,
2509 "\nLoop at %d ignored due to setjmp.\n",
2510 INSN_UID (loop->start));
2512 break;
2514 case NOTE_INSN_LOOP_CONT:
2515 current_loop->cont = insn;
2516 break;
2518 case NOTE_INSN_LOOP_VTOP:
2519 current_loop->vtop = insn;
2520 break;
2522 case NOTE_INSN_LOOP_END:
2523 if (! current_loop)
2524 abort ();
2526 current_loop->end = insn;
2527 current_loop = current_loop->outer;
2528 break;
2530 default:
2531 break;
2534 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2535 enclosing loop, but this doesn't matter. */
2536 uid_loop[INSN_UID (insn)] = current_loop;
2539 /* Any loop containing a label used in an initializer must be invalidated,
2540 because it can be jumped into from anywhere. */
2542 for (label = forced_labels; label; label = XEXP (label, 1))
2544 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2545 loop; loop = loop->outer)
2546 loop->invalid = 1;
2549 /* Any loop containing a label used for an exception handler must be
2550 invalidated, because it can be jumped into from anywhere. */
2552 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2554 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2555 loop; loop = loop->outer)
2556 loop->invalid = 1;
2559 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2560 loop that it is not contained within, that loop is marked invalid.
2561 If any INSN or CALL_INSN uses a label's address, then the loop containing
2562 that label is marked invalid, because it could be jumped into from
2563 anywhere.
2565 Also look for blocks of code ending in an unconditional branch that
2566 exits the loop. If such a block is surrounded by a conditional
2567 branch around the block, move the block elsewhere (see below) and
2568 invert the jump to point to the code block. This may eliminate a
2569 label in our loop and will simplify processing by both us and a
2570 possible second cse pass. */
2572 for (insn = f; insn; insn = NEXT_INSN (insn))
2573 if (INSN_P (insn))
2575 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2577 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2579 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2580 if (note)
2582 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2583 loop; loop = loop->outer)
2584 loop->invalid = 1;
2588 if (GET_CODE (insn) != JUMP_INSN)
2589 continue;
2591 mark_loop_jump (PATTERN (insn), this_loop);
2593 /* See if this is an unconditional branch outside the loop. */
2594 if (this_loop
2595 && (GET_CODE (PATTERN (insn)) == RETURN
2596 || (any_uncondjump_p (insn)
2597 && onlyjump_p (insn)
2598 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2599 != this_loop)))
2600 && get_max_uid () < max_uid_for_loop)
2602 rtx p;
2603 rtx our_next = next_real_insn (insn);
2604 rtx last_insn_to_move = NEXT_INSN (insn);
2605 struct loop *dest_loop;
2606 struct loop *outer_loop = NULL;
2608 /* Go backwards until we reach the start of the loop, a label,
2609 or a JUMP_INSN. */
2610 for (p = PREV_INSN (insn);
2611 GET_CODE (p) != CODE_LABEL
2612 && ! (GET_CODE (p) == NOTE
2613 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2614 && GET_CODE (p) != JUMP_INSN;
2615 p = PREV_INSN (p))
2618 /* Check for the case where we have a jump to an inner nested
2619 loop, and do not perform the optimization in that case. */
2621 if (JUMP_LABEL (insn))
2623 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2624 if (dest_loop)
2626 for (outer_loop = dest_loop; outer_loop;
2627 outer_loop = outer_loop->outer)
2628 if (outer_loop == this_loop)
2629 break;
2633 /* Make sure that the target of P is within the current loop. */
2635 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2636 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2637 outer_loop = this_loop;
2639 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2640 we have a block of code to try to move.
2642 We look backward and then forward from the target of INSN
2643 to find a BARRIER at the same loop depth as the target.
2644 If we find such a BARRIER, we make a new label for the start
2645 of the block, invert the jump in P and point it to that label,
2646 and move the block of code to the spot we found. */
2648 if (! outer_loop
2649 && GET_CODE (p) == JUMP_INSN
2650 && JUMP_LABEL (p) != 0
2651 /* Just ignore jumps to labels that were never emitted.
2652 These always indicate compilation errors. */
2653 && INSN_UID (JUMP_LABEL (p)) != 0
2654 && any_condjump_p (p) && onlyjump_p (p)
2655 && next_real_insn (JUMP_LABEL (p)) == our_next
2656 /* If it's not safe to move the sequence, then we
2657 mustn't try. */
2658 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2659 &last_insn_to_move))
2661 rtx target
2662 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2663 struct loop *target_loop = uid_loop[INSN_UID (target)];
2664 rtx loc, loc2;
2666 for (loc = target; loc; loc = PREV_INSN (loc))
2667 if (GET_CODE (loc) == BARRIER
2668 /* Don't move things inside a tablejump. */
2669 && ((loc2 = next_nonnote_insn (loc)) == 0
2670 || GET_CODE (loc2) != CODE_LABEL
2671 || (loc2 = next_nonnote_insn (loc2)) == 0
2672 || GET_CODE (loc2) != JUMP_INSN
2673 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2674 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2675 && uid_loop[INSN_UID (loc)] == target_loop)
2676 break;
2678 if (loc == 0)
2679 for (loc = target; loc; loc = NEXT_INSN (loc))
2680 if (GET_CODE (loc) == BARRIER
2681 /* Don't move things inside a tablejump. */
2682 && ((loc2 = next_nonnote_insn (loc)) == 0
2683 || GET_CODE (loc2) != CODE_LABEL
2684 || (loc2 = next_nonnote_insn (loc2)) == 0
2685 || GET_CODE (loc2) != JUMP_INSN
2686 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2687 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2688 && uid_loop[INSN_UID (loc)] == target_loop)
2689 break;
2691 if (loc)
2693 rtx cond_label = JUMP_LABEL (p);
2694 rtx new_label = get_label_after (p);
2696 /* Ensure our label doesn't go away. */
2697 LABEL_NUSES (cond_label)++;
2699 /* Verify that uid_loop is large enough and that
2700 we can invert P. */
2701 if (invert_jump (p, new_label, 1))
2703 rtx q, r;
2705 /* If no suitable BARRIER was found, create a suitable
2706 one before TARGET. Since TARGET is a fall through
2707 path, we'll need to insert an jump around our block
2708 and a add a BARRIER before TARGET.
2710 This creates an extra unconditional jump outside
2711 the loop. However, the benefits of removing rarely
2712 executed instructions from inside the loop usually
2713 outweighs the cost of the extra unconditional jump
2714 outside the loop. */
2715 if (loc == 0)
2717 rtx temp;
2719 temp = gen_jump (JUMP_LABEL (insn));
2720 temp = emit_jump_insn_before (temp, target);
2721 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2722 LABEL_NUSES (JUMP_LABEL (insn))++;
2723 loc = emit_barrier_before (target);
2726 /* Include the BARRIER after INSN and copy the
2727 block after LOC. */
2728 new_label = squeeze_notes (new_label,
2729 last_insn_to_move);
2730 reorder_insns (new_label, last_insn_to_move, loc);
2732 /* All those insns are now in TARGET_LOOP. */
2733 for (q = new_label;
2734 q != NEXT_INSN (last_insn_to_move);
2735 q = NEXT_INSN (q))
2736 uid_loop[INSN_UID (q)] = target_loop;
2738 /* The label jumped to by INSN is no longer a loop
2739 exit. Unless INSN does not have a label (e.g.,
2740 it is a RETURN insn), search loop->exit_labels
2741 to find its label_ref, and remove it. Also turn
2742 off LABEL_OUTSIDE_LOOP_P bit. */
2743 if (JUMP_LABEL (insn))
2745 for (q = 0, r = this_loop->exit_labels;
2747 q = r, r = LABEL_NEXTREF (r))
2748 if (XEXP (r, 0) == JUMP_LABEL (insn))
2750 LABEL_OUTSIDE_LOOP_P (r) = 0;
2751 if (q)
2752 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2753 else
2754 this_loop->exit_labels = LABEL_NEXTREF (r);
2755 break;
2758 for (loop = this_loop; loop && loop != target_loop;
2759 loop = loop->outer)
2760 loop->exit_count--;
2762 /* If we didn't find it, then something is
2763 wrong. */
2764 if (! r)
2765 abort ();
2768 /* P is now a jump outside the loop, so it must be put
2769 in loop->exit_labels, and marked as such.
2770 The easiest way to do this is to just call
2771 mark_loop_jump again for P. */
2772 mark_loop_jump (PATTERN (p), this_loop);
2774 /* If INSN now jumps to the insn after it,
2775 delete INSN. */
2776 if (JUMP_LABEL (insn) != 0
2777 && (next_real_insn (JUMP_LABEL (insn))
2778 == next_real_insn (insn)))
2779 delete_insn (insn);
2782 /* Continue the loop after where the conditional
2783 branch used to jump, since the only branch insn
2784 in the block (if it still remains) is an inter-loop
2785 branch and hence needs no processing. */
2786 insn = NEXT_INSN (cond_label);
2788 if (--LABEL_NUSES (cond_label) == 0)
2789 delete_insn (cond_label);
2791 /* This loop will be continued with NEXT_INSN (insn). */
2792 insn = PREV_INSN (insn);
2799 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2800 loops it is contained in, mark the target loop invalid.
2802 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2804 static void
2805 mark_loop_jump (x, loop)
2806 rtx x;
2807 struct loop *loop;
2809 struct loop *dest_loop;
2810 struct loop *outer_loop;
2811 int i;
2813 switch (GET_CODE (x))
2815 case PC:
2816 case USE:
2817 case CLOBBER:
2818 case REG:
2819 case MEM:
2820 case CONST_INT:
2821 case CONST_DOUBLE:
2822 case RETURN:
2823 return;
2825 case CONST:
2826 /* There could be a label reference in here. */
2827 mark_loop_jump (XEXP (x, 0), loop);
2828 return;
2830 case PLUS:
2831 case MINUS:
2832 case MULT:
2833 mark_loop_jump (XEXP (x, 0), loop);
2834 mark_loop_jump (XEXP (x, 1), loop);
2835 return;
2837 case LO_SUM:
2838 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2839 mark_loop_jump (XEXP (x, 1), loop);
2840 return;
2842 case SIGN_EXTEND:
2843 case ZERO_EXTEND:
2844 mark_loop_jump (XEXP (x, 0), loop);
2845 return;
2847 case LABEL_REF:
2848 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2850 /* Link together all labels that branch outside the loop. This
2851 is used by final_[bg]iv_value and the loop unrolling code. Also
2852 mark this LABEL_REF so we know that this branch should predict
2853 false. */
2855 /* A check to make sure the label is not in an inner nested loop,
2856 since this does not count as a loop exit. */
2857 if (dest_loop)
2859 for (outer_loop = dest_loop; outer_loop;
2860 outer_loop = outer_loop->outer)
2861 if (outer_loop == loop)
2862 break;
2864 else
2865 outer_loop = NULL;
2867 if (loop && ! outer_loop)
2869 LABEL_OUTSIDE_LOOP_P (x) = 1;
2870 LABEL_NEXTREF (x) = loop->exit_labels;
2871 loop->exit_labels = x;
2873 for (outer_loop = loop;
2874 outer_loop && outer_loop != dest_loop;
2875 outer_loop = outer_loop->outer)
2876 outer_loop->exit_count++;
2879 /* If this is inside a loop, but not in the current loop or one enclosed
2880 by it, it invalidates at least one loop. */
2882 if (! dest_loop)
2883 return;
2885 /* We must invalidate every nested loop containing the target of this
2886 label, except those that also contain the jump insn. */
2888 for (; dest_loop; dest_loop = dest_loop->outer)
2890 /* Stop when we reach a loop that also contains the jump insn. */
2891 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2892 if (dest_loop == outer_loop)
2893 return;
2895 /* If we get here, we know we need to invalidate a loop. */
2896 if (loop_dump_stream && ! dest_loop->invalid)
2897 fprintf (loop_dump_stream,
2898 "\nLoop at %d ignored due to multiple entry points.\n",
2899 INSN_UID (dest_loop->start));
2901 dest_loop->invalid = 1;
2903 return;
2905 case SET:
2906 /* If this is not setting pc, ignore. */
2907 if (SET_DEST (x) == pc_rtx)
2908 mark_loop_jump (SET_SRC (x), loop);
2909 return;
2911 case IF_THEN_ELSE:
2912 mark_loop_jump (XEXP (x, 1), loop);
2913 mark_loop_jump (XEXP (x, 2), loop);
2914 return;
2916 case PARALLEL:
2917 case ADDR_VEC:
2918 for (i = 0; i < XVECLEN (x, 0); i++)
2919 mark_loop_jump (XVECEXP (x, 0, i), loop);
2920 return;
2922 case ADDR_DIFF_VEC:
2923 for (i = 0; i < XVECLEN (x, 1); i++)
2924 mark_loop_jump (XVECEXP (x, 1, i), loop);
2925 return;
2927 default:
2928 /* Strictly speaking this is not a jump into the loop, only a possible
2929 jump out of the loop. However, we have no way to link the destination
2930 of this jump onto the list of exit labels. To be safe we mark this
2931 loop and any containing loops as invalid. */
2932 if (loop)
2934 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2936 if (loop_dump_stream && ! outer_loop->invalid)
2937 fprintf (loop_dump_stream,
2938 "\nLoop at %d ignored due to unknown exit jump.\n",
2939 INSN_UID (outer_loop->start));
2940 outer_loop->invalid = 1;
2943 return;
2947 /* Return nonzero if there is a label in the range from
2948 insn INSN to and including the insn whose luid is END
2949 INSN must have an assigned luid (i.e., it must not have
2950 been previously created by loop.c). */
2952 static int
2953 labels_in_range_p (insn, end)
2954 rtx insn;
2955 int end;
2957 while (insn && INSN_LUID (insn) <= end)
2959 if (GET_CODE (insn) == CODE_LABEL)
2960 return 1;
2961 insn = NEXT_INSN (insn);
2964 return 0;
2967 /* Record that a memory reference X is being set. */
2969 static void
2970 note_addr_stored (x, y, data)
2971 rtx x;
2972 rtx y ATTRIBUTE_UNUSED;
2973 void *data ATTRIBUTE_UNUSED;
2975 struct loop_info *loop_info = data;
2977 if (x == 0 || GET_CODE (x) != MEM)
2978 return;
2980 /* Count number of memory writes.
2981 This affects heuristics in strength_reduce. */
2982 loop_info->num_mem_sets++;
2984 /* BLKmode MEM means all memory is clobbered. */
2985 if (GET_MODE (x) == BLKmode)
2987 if (RTX_UNCHANGING_P (x))
2988 loop_info->unknown_constant_address_altered = 1;
2989 else
2990 loop_info->unknown_address_altered = 1;
2992 return;
2995 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
2996 loop_info->store_mems);
2999 /* X is a value modified by an INSN that references a biv inside a loop
3000 exit test (ie, X is somehow related to the value of the biv). If X
3001 is a pseudo that is used more than once, then the biv is (effectively)
3002 used more than once. DATA is a pointer to a loop_regs structure. */
3004 static void
3005 note_set_pseudo_multiple_uses (x, y, data)
3006 rtx x;
3007 rtx y ATTRIBUTE_UNUSED;
3008 void *data;
3010 struct loop_regs *regs = (struct loop_regs *) data;
3012 if (x == 0)
3013 return;
3015 while (GET_CODE (x) == STRICT_LOW_PART
3016 || GET_CODE (x) == SIGN_EXTRACT
3017 || GET_CODE (x) == ZERO_EXTRACT
3018 || GET_CODE (x) == SUBREG)
3019 x = XEXP (x, 0);
3021 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3022 return;
3024 /* If we do not have usage information, or if we know the register
3025 is used more than once, note that fact for check_dbra_loop. */
3026 if (REGNO (x) >= max_reg_before_loop
3027 || ! regs->array[REGNO (x)].single_usage
3028 || regs->array[REGNO (x)].single_usage == const0_rtx)
3029 regs->multiple_uses = 1;
3032 /* Return nonzero if the rtx X is invariant over the current loop.
3034 The value is 2 if we refer to something only conditionally invariant.
3036 A memory ref is invariant if it is not volatile and does not conflict
3037 with anything stored in `loop_info->store_mems'. */
3040 loop_invariant_p (loop, x)
3041 const struct loop *loop;
3042 register rtx x;
3044 struct loop_info *loop_info = LOOP_INFO (loop);
3045 struct loop_regs *regs = LOOP_REGS (loop);
3046 register int i;
3047 register enum rtx_code code;
3048 register const char *fmt;
3049 int conditional = 0;
3050 rtx mem_list_entry;
3052 if (x == 0)
3053 return 1;
3054 code = GET_CODE (x);
3055 switch (code)
3057 case CONST_INT:
3058 case CONST_DOUBLE:
3059 case SYMBOL_REF:
3060 case CONST:
3061 return 1;
3063 case LABEL_REF:
3064 /* A LABEL_REF is normally invariant, however, if we are unrolling
3065 loops, and this label is inside the loop, then it isn't invariant.
3066 This is because each unrolled copy of the loop body will have
3067 a copy of this label. If this was invariant, then an insn loading
3068 the address of this label into a register might get moved outside
3069 the loop, and then each loop body would end up using the same label.
3071 We don't know the loop bounds here though, so just fail for all
3072 labels. */
3073 if (flag_unroll_loops)
3074 return 0;
3075 else
3076 return 1;
3078 case PC:
3079 case CC0:
3080 case UNSPEC_VOLATILE:
3081 return 0;
3083 case REG:
3084 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3085 since the reg might be set by initialization within the loop. */
3087 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3088 || x == arg_pointer_rtx)
3089 && ! current_function_has_nonlocal_goto)
3090 return 1;
3092 if (LOOP_INFO (loop)->has_call
3093 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3094 return 0;
3096 if (regs->array[REGNO (x)].set_in_loop < 0)
3097 return 2;
3099 return regs->array[REGNO (x)].set_in_loop == 0;
3101 case MEM:
3102 /* Volatile memory references must be rejected. Do this before
3103 checking for read-only items, so that volatile read-only items
3104 will be rejected also. */
3105 if (MEM_VOLATILE_P (x))
3106 return 0;
3108 /* See if there is any dependence between a store and this load. */
3109 mem_list_entry = loop_info->store_mems;
3110 while (mem_list_entry)
3112 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3113 x, rtx_varies_p))
3114 return 0;
3116 mem_list_entry = XEXP (mem_list_entry, 1);
3119 /* It's not invalidated by a store in memory
3120 but we must still verify the address is invariant. */
3121 break;
3123 case ASM_OPERANDS:
3124 /* Don't mess with insns declared volatile. */
3125 if (MEM_VOLATILE_P (x))
3126 return 0;
3127 break;
3129 default:
3130 break;
3133 fmt = GET_RTX_FORMAT (code);
3134 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3136 if (fmt[i] == 'e')
3138 int tem = loop_invariant_p (loop, XEXP (x, i));
3139 if (tem == 0)
3140 return 0;
3141 if (tem == 2)
3142 conditional = 1;
3144 else if (fmt[i] == 'E')
3146 register int j;
3147 for (j = 0; j < XVECLEN (x, i); j++)
3149 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3150 if (tem == 0)
3151 return 0;
3152 if (tem == 2)
3153 conditional = 1;
3159 return 1 + conditional;
3162 /* Return nonzero if all the insns in the loop that set REG
3163 are INSN and the immediately following insns,
3164 and if each of those insns sets REG in an invariant way
3165 (not counting uses of REG in them).
3167 The value is 2 if some of these insns are only conditionally invariant.
3169 We assume that INSN itself is the first set of REG
3170 and that its source is invariant. */
3172 static int
3173 consec_sets_invariant_p (loop, reg, n_sets, insn)
3174 const struct loop *loop;
3175 int n_sets;
3176 rtx reg, insn;
3178 struct loop_regs *regs = LOOP_REGS (loop);
3179 rtx p = insn;
3180 unsigned int regno = REGNO (reg);
3181 rtx temp;
3182 /* Number of sets we have to insist on finding after INSN. */
3183 int count = n_sets - 1;
3184 int old = regs->array[regno].set_in_loop;
3185 int value = 0;
3186 int this;
3188 /* If N_SETS hit the limit, we can't rely on its value. */
3189 if (n_sets == 127)
3190 return 0;
3192 regs->array[regno].set_in_loop = 0;
3194 while (count > 0)
3196 register enum rtx_code code;
3197 rtx set;
3199 p = NEXT_INSN (p);
3200 code = GET_CODE (p);
3202 /* If library call, skip to end of it. */
3203 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3204 p = XEXP (temp, 0);
3206 this = 0;
3207 if (code == INSN
3208 && (set = single_set (p))
3209 && GET_CODE (SET_DEST (set)) == REG
3210 && REGNO (SET_DEST (set)) == regno)
3212 this = loop_invariant_p (loop, SET_SRC (set));
3213 if (this != 0)
3214 value |= this;
3215 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3217 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3218 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3219 notes are OK. */
3220 this = (CONSTANT_P (XEXP (temp, 0))
3221 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3222 && loop_invariant_p (loop, XEXP (temp, 0))));
3223 if (this != 0)
3224 value |= this;
3227 if (this != 0)
3228 count--;
3229 else if (code != NOTE)
3231 regs->array[regno].set_in_loop = old;
3232 return 0;
3236 regs->array[regno].set_in_loop = old;
3237 /* If loop_invariant_p ever returned 2, we return 2. */
3238 return 1 + (value & 2);
3241 #if 0
3242 /* I don't think this condition is sufficient to allow INSN
3243 to be moved, so we no longer test it. */
3245 /* Return 1 if all insns in the basic block of INSN and following INSN
3246 that set REG are invariant according to TABLE. */
3248 static int
3249 all_sets_invariant_p (reg, insn, table)
3250 rtx reg, insn;
3251 short *table;
3253 register rtx p = insn;
3254 register int regno = REGNO (reg);
3256 while (1)
3258 register enum rtx_code code;
3259 p = NEXT_INSN (p);
3260 code = GET_CODE (p);
3261 if (code == CODE_LABEL || code == JUMP_INSN)
3262 return 1;
3263 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3264 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3265 && REGNO (SET_DEST (PATTERN (p))) == regno)
3267 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3268 return 0;
3272 #endif /* 0 */
3274 /* Look at all uses (not sets) of registers in X. For each, if it is
3275 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3276 a different insn, set USAGE[REGNO] to const0_rtx. */
3278 static void
3279 find_single_use_in_loop (regs, insn, x)
3280 struct loop_regs *regs;
3281 rtx insn;
3282 rtx x;
3284 enum rtx_code code = GET_CODE (x);
3285 const char *fmt = GET_RTX_FORMAT (code);
3286 int i, j;
3288 if (code == REG)
3289 regs->array[REGNO (x)].single_usage
3290 = (regs->array[REGNO (x)].single_usage != 0
3291 && regs->array[REGNO (x)].single_usage != insn)
3292 ? const0_rtx : insn;
3294 else if (code == SET)
3296 /* Don't count SET_DEST if it is a REG; otherwise count things
3297 in SET_DEST because if a register is partially modified, it won't
3298 show up as a potential movable so we don't care how USAGE is set
3299 for it. */
3300 if (GET_CODE (SET_DEST (x)) != REG)
3301 find_single_use_in_loop (regs, insn, SET_DEST (x));
3302 find_single_use_in_loop (regs, insn, SET_SRC (x));
3304 else
3305 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3307 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3308 find_single_use_in_loop (regs, insn, XEXP (x, i));
3309 else if (fmt[i] == 'E')
3310 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3311 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3315 /* Count and record any set in X which is contained in INSN. Update
3316 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3317 in X. */
3319 static void
3320 count_one_set (regs, insn, x, last_set)
3321 struct loop_regs *regs;
3322 rtx insn, x;
3323 rtx *last_set;
3325 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3326 /* Don't move a reg that has an explicit clobber.
3327 It's not worth the pain to try to do it correctly. */
3328 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3330 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3332 rtx dest = SET_DEST (x);
3333 while (GET_CODE (dest) == SUBREG
3334 || GET_CODE (dest) == ZERO_EXTRACT
3335 || GET_CODE (dest) == SIGN_EXTRACT
3336 || GET_CODE (dest) == STRICT_LOW_PART)
3337 dest = XEXP (dest, 0);
3338 if (GET_CODE (dest) == REG)
3340 register int regno = REGNO (dest);
3341 /* If this is the first setting of this reg
3342 in current basic block, and it was set before,
3343 it must be set in two basic blocks, so it cannot
3344 be moved out of the loop. */
3345 if (regs->array[regno].set_in_loop > 0
3346 && last_set == 0)
3347 regs->array[regno].may_not_optimize = 1;
3348 /* If this is not first setting in current basic block,
3349 see if reg was used in between previous one and this.
3350 If so, neither one can be moved. */
3351 if (last_set[regno] != 0
3352 && reg_used_between_p (dest, last_set[regno], insn))
3353 regs->array[regno].may_not_optimize = 1;
3354 if (regs->array[regno].set_in_loop < 127)
3355 ++regs->array[regno].set_in_loop;
3356 last_set[regno] = insn;
3361 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3362 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3363 contained in insn INSN is used by any insn that precedes INSN in
3364 cyclic order starting from the loop entry point.
3366 We don't want to use INSN_LUID here because if we restrict INSN to those
3367 that have a valid INSN_LUID, it means we cannot move an invariant out
3368 from an inner loop past two loops. */
3370 static int
3371 loop_reg_used_before_p (loop, set, insn)
3372 const struct loop *loop;
3373 rtx set, insn;
3375 rtx reg = SET_DEST (set);
3376 rtx p;
3378 /* Scan forward checking for register usage. If we hit INSN, we
3379 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3380 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3382 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3383 return 1;
3385 if (p == loop->end)
3386 p = loop->start;
3389 return 0;
3392 /* A "basic induction variable" or biv is a pseudo reg that is set
3393 (within this loop) only by incrementing or decrementing it. */
3394 /* A "general induction variable" or giv is a pseudo reg whose
3395 value is a linear function of a biv. */
3397 /* Bivs are recognized by `basic_induction_var';
3398 Givs by `general_induction_var'. */
3400 /* Communication with routines called via `note_stores'. */
3402 static rtx note_insn;
3404 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3406 static rtx addr_placeholder;
3408 /* ??? Unfinished optimizations, and possible future optimizations,
3409 for the strength reduction code. */
3411 /* ??? The interaction of biv elimination, and recognition of 'constant'
3412 bivs, may cause problems. */
3414 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3415 performance problems.
3417 Perhaps don't eliminate things that can be combined with an addressing
3418 mode. Find all givs that have the same biv, mult_val, and add_val;
3419 then for each giv, check to see if its only use dies in a following
3420 memory address. If so, generate a new memory address and check to see
3421 if it is valid. If it is valid, then store the modified memory address,
3422 otherwise, mark the giv as not done so that it will get its own iv. */
3424 /* ??? Could try to optimize branches when it is known that a biv is always
3425 positive. */
3427 /* ??? When replace a biv in a compare insn, we should replace with closest
3428 giv so that an optimized branch can still be recognized by the combiner,
3429 e.g. the VAX acb insn. */
3431 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3432 was rerun in loop_optimize whenever a register was added or moved.
3433 Also, some of the optimizations could be a little less conservative. */
3435 /* Scan the loop body and call FNCALL for each insn. In the addition to the
3436 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
3437 callback.
3439 NOT_EVERY_ITERATION if current insn is not executed at least once for every
3440 loop iteration except for the last one.
3442 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
3443 loop iteration.
3445 void
3446 for_each_insn_in_loop (loop, fncall)
3447 struct loop *loop;
3448 loop_insn_callback fncall;
3450 /* This is 1 if current insn is not executed at least once for every loop
3451 iteration. */
3452 int not_every_iteration = 0;
3453 int maybe_multiple = 0;
3454 int past_loop_latch = 0;
3455 int loop_depth = 0;
3456 rtx p;
3458 /* If loop_scan_start points to the loop exit test, we have to be wary of
3459 subversive use of gotos inside expression statements. */
3460 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
3461 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
3463 /* Scan through loop to find all possible bivs. */
3465 for (p = next_insn_in_loop (loop, loop->scan_start);
3466 p != NULL_RTX;
3467 p = next_insn_in_loop (loop, p))
3469 p = fncall (loop, p, not_every_iteration, maybe_multiple);
3471 /* Past CODE_LABEL, we get to insns that may be executed multiple
3472 times. The only way we can be sure that they can't is if every
3473 jump insn between here and the end of the loop either
3474 returns, exits the loop, is a jump to a location that is still
3475 behind the label, or is a jump to the loop start. */
3477 if (GET_CODE (p) == CODE_LABEL)
3479 rtx insn = p;
3481 maybe_multiple = 0;
3483 while (1)
3485 insn = NEXT_INSN (insn);
3486 if (insn == loop->scan_start)
3487 break;
3488 if (insn == loop->end)
3490 if (loop->top != 0)
3491 insn = loop->top;
3492 else
3493 break;
3494 if (insn == loop->scan_start)
3495 break;
3498 if (GET_CODE (insn) == JUMP_INSN
3499 && GET_CODE (PATTERN (insn)) != RETURN
3500 && (!any_condjump_p (insn)
3501 || (JUMP_LABEL (insn) != 0
3502 && JUMP_LABEL (insn) != loop->scan_start
3503 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
3505 maybe_multiple = 1;
3506 break;
3511 /* Past a jump, we get to insns for which we can't count
3512 on whether they will be executed during each iteration. */
3513 /* This code appears twice in strength_reduce. There is also similar
3514 code in scan_loop. */
3515 if (GET_CODE (p) == JUMP_INSN
3516 /* If we enter the loop in the middle, and scan around to the
3517 beginning, don't set not_every_iteration for that.
3518 This can be any kind of jump, since we want to know if insns
3519 will be executed if the loop is executed. */
3520 && !(JUMP_LABEL (p) == loop->top
3521 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
3522 && any_uncondjump_p (p))
3523 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
3525 rtx label = 0;
3527 /* If this is a jump outside the loop, then it also doesn't
3528 matter. Check to see if the target of this branch is on the
3529 loop->exits_labels list. */
3531 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
3532 if (XEXP (label, 0) == JUMP_LABEL (p))
3533 break;
3535 if (!label)
3536 not_every_iteration = 1;
3539 else if (GET_CODE (p) == NOTE)
3541 /* At the virtual top of a converted loop, insns are again known to
3542 be executed each iteration: logically, the loop begins here
3543 even though the exit code has been duplicated.
3545 Insns are also again known to be executed each iteration at
3546 the LOOP_CONT note. */
3547 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3548 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3549 && loop_depth == 0)
3550 not_every_iteration = 0;
3551 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3552 loop_depth++;
3553 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3554 loop_depth--;
3557 /* Note if we pass a loop latch. If we do, then we can not clear
3558 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3559 a loop since a jump before the last CODE_LABEL may have started
3560 a new loop iteration.
3562 Note that LOOP_TOP is only set for rotated loops and we need
3563 this check for all loops, so compare against the CODE_LABEL
3564 which immediately follows LOOP_START. */
3565 if (GET_CODE (p) == JUMP_INSN
3566 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
3567 past_loop_latch = 1;
3569 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3570 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3571 or not an insn is known to be executed each iteration of the
3572 loop, whether or not any iterations are known to occur.
3574 Therefore, if we have just passed a label and have no more labels
3575 between here and the test insn of the loop, and we have not passed
3576 a jump to the top of the loop, then we know these insns will be
3577 executed each iteration. */
3579 if (not_every_iteration
3580 && !past_loop_latch
3581 && GET_CODE (p) == CODE_LABEL
3582 && no_labels_between_p (p, loop->end)
3583 && loop_insn_first_p (p, loop->cont))
3584 not_every_iteration = 0;
3588 static void
3589 loop_bivs_find (loop)
3590 struct loop *loop;
3592 struct loop_regs *regs = LOOP_REGS (loop);
3593 struct loop_ivs *ivs = LOOP_IVS (loop);
3594 /* Temporary list pointers for traversing ivs->list. */
3595 struct iv_class *bl, **backbl;
3597 ivs->list = 0;
3599 for_each_insn_in_loop (loop, check_insn_for_bivs);
3601 /* Scan ivs->list to remove all regs that proved not to be bivs.
3602 Make a sanity check against regs->n_times_set. */
3603 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
3605 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3606 /* Above happens if register modified by subreg, etc. */
3607 /* Make sure it is not recognized as a basic induction var: */
3608 || regs->array[bl->regno].n_times_set != bl->biv_count
3609 /* If never incremented, it is invariant that we decided not to
3610 move. So leave it alone. */
3611 || ! bl->incremented)
3613 if (loop_dump_stream)
3614 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
3615 bl->regno,
3616 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3617 ? "not induction variable"
3618 : (! bl->incremented ? "never incremented"
3619 : "count error")));
3621 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
3622 *backbl = bl->next;
3624 else
3626 backbl = &bl->next;
3628 if (loop_dump_stream)
3629 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
3635 /* Determine how BIVS are initialised by looking through pre-header
3636 extended basic block. */
3637 static void
3638 loop_bivs_init_find (loop)
3639 struct loop *loop;
3641 struct loop_ivs *ivs = LOOP_IVS (loop);
3642 /* Temporary list pointers for traversing ivs->list. */
3643 struct iv_class *bl;
3644 int call_seen;
3645 rtx p;
3647 /* Find initial value for each biv by searching backwards from loop_start,
3648 halting at first label. Also record any test condition. */
3650 call_seen = 0;
3651 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3653 rtx test;
3655 note_insn = p;
3657 if (GET_CODE (p) == CALL_INSN)
3658 call_seen = 1;
3660 if (INSN_P (p))
3661 note_stores (PATTERN (p), record_initial, ivs);
3663 /* Record any test of a biv that branches around the loop if no store
3664 between it and the start of loop. We only care about tests with
3665 constants and registers and only certain of those. */
3666 if (GET_CODE (p) == JUMP_INSN
3667 && JUMP_LABEL (p) != 0
3668 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
3669 && (test = get_condition_for_loop (loop, p)) != 0
3670 && GET_CODE (XEXP (test, 0)) == REG
3671 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3672 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
3673 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
3674 && bl->init_insn == 0)
3676 /* If an NE test, we have an initial value! */
3677 if (GET_CODE (test) == NE)
3679 bl->init_insn = p;
3680 bl->init_set = gen_rtx_SET (VOIDmode,
3681 XEXP (test, 0), XEXP (test, 1));
3683 else
3684 bl->initial_test = test;
3690 /* Look at the each biv and see if we can say anything better about its
3691 initial value from any initializing insns set up above. (This is done
3692 in two passes to avoid missing SETs in a PARALLEL.) */
3693 static void
3694 loop_bivs_check (loop)
3695 struct loop *loop;
3697 struct loop_ivs *ivs = LOOP_IVS (loop);
3698 /* Temporary list pointers for traversing ivs->list. */
3699 struct iv_class *bl;
3700 struct iv_class **backbl;
3702 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
3704 rtx src;
3705 rtx note;
3707 if (! bl->init_insn)
3708 continue;
3710 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3711 is a constant, use the value of that. */
3712 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3713 && CONSTANT_P (XEXP (note, 0)))
3714 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3715 && CONSTANT_P (XEXP (note, 0))))
3716 src = XEXP (note, 0);
3717 else
3718 src = SET_SRC (bl->init_set);
3720 if (loop_dump_stream)
3721 fprintf (loop_dump_stream,
3722 "Biv %d: initialized at insn %d: initial value ",
3723 bl->regno, INSN_UID (bl->init_insn));
3725 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3726 || GET_MODE (src) == VOIDmode)
3727 && valid_initial_value_p (src, bl->init_insn,
3728 LOOP_INFO (loop)->pre_header_has_call,
3729 loop->start))
3731 bl->initial_value = src;
3733 if (loop_dump_stream)
3735 print_simple_rtl (loop_dump_stream, src);
3736 fputc ('\n', loop_dump_stream);
3739 /* If we can't make it a giv,
3740 let biv keep initial value of "itself". */
3741 else if (loop_dump_stream)
3742 fprintf (loop_dump_stream, "is complex\n");
3747 /* Search the loop for general induction variables. */
3749 static void
3750 loop_givs_find (loop)
3751 struct loop* loop;
3753 for_each_insn_in_loop (loop, check_insn_for_givs);
3757 /* For each giv for which we still don't know whether or not it is
3758 replaceable, check to see if it is replaceable because its final value
3759 can be calculated. */
3761 static void
3762 loop_givs_check (loop)
3763 struct loop *loop;
3765 struct loop_ivs *ivs = LOOP_IVS (loop);
3766 struct iv_class *bl;
3768 for (bl = ivs->list; bl; bl = bl->next)
3770 struct induction *v;
3772 for (v = bl->giv; v; v = v->next_iv)
3773 if (! v->replaceable && ! v->not_replaceable)
3774 check_final_value (loop, v);
3779 /* Return non-zero if it is possible to eliminate the biv BL provided
3780 all givs are reduced. This is possible if either the reg is not
3781 used outside the loop, or we can compute what its final value will
3782 be. */
3784 static int
3785 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
3786 struct loop *loop;
3787 struct iv_class *bl;
3788 int threshold;
3789 int insn_count;
3791 /* For architectures with a decrement_and_branch_until_zero insn,
3792 don't do this if we put a REG_NONNEG note on the endtest for this
3793 biv. */
3795 #ifdef HAVE_decrement_and_branch_until_zero
3796 if (bl->nonneg)
3798 if (loop_dump_stream)
3799 fprintf (loop_dump_stream,
3800 "Cannot eliminate nonneg biv %d.\n", bl->regno);
3801 return 0;
3803 #endif
3805 /* Check that biv is used outside loop or if it has a final value.
3806 Compare against bl->init_insn rather than loop->start. We aren't
3807 concerned with any uses of the biv between init_insn and
3808 loop->start since these won't be affected by the value of the biv
3809 elsewhere in the function, so long as init_insn doesn't use the
3810 biv itself. */
3812 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
3813 && bl->init_insn
3814 && INSN_UID (bl->init_insn) < max_uid_for_loop
3815 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
3816 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3817 || (bl->final_value = final_biv_value (loop, bl)))
3818 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
3820 if (loop_dump_stream)
3822 fprintf (loop_dump_stream,
3823 "Cannot eliminate biv %d.\n",
3824 bl->regno);
3825 fprintf (loop_dump_stream,
3826 "First use: insn %d, last use: insn %d.\n",
3827 REGNO_FIRST_UID (bl->regno),
3828 REGNO_LAST_UID (bl->regno));
3830 return 0;
3834 /* Reduce each giv of BL that we have decided to reduce. */
3836 static void
3837 loop_givs_reduce (loop, bl)
3838 struct loop *loop;
3839 struct iv_class *bl;
3841 struct induction *v;
3843 for (v = bl->giv; v; v = v->next_iv)
3845 struct induction *tv;
3846 if (! v->ignore && v->same == 0)
3848 int auto_inc_opt = 0;
3850 /* If the code for derived givs immediately below has already
3851 allocated a new_reg, we must keep it. */
3852 if (! v->new_reg)
3853 v->new_reg = gen_reg_rtx (v->mode);
3855 #ifdef AUTO_INC_DEC
3856 /* If the target has auto-increment addressing modes, and
3857 this is an address giv, then try to put the increment
3858 immediately after its use, so that flow can create an
3859 auto-increment addressing mode. */
3860 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
3861 && bl->biv->always_executed && ! bl->biv->maybe_multiple
3862 /* We don't handle reversed biv's because bl->biv->insn
3863 does not have a valid INSN_LUID. */
3864 && ! bl->reversed
3865 && v->always_executed && ! v->maybe_multiple
3866 && INSN_UID (v->insn) < max_uid_for_loop)
3868 /* If other giv's have been combined with this one, then
3869 this will work only if all uses of the other giv's occur
3870 before this giv's insn. This is difficult to check.
3872 We simplify this by looking for the common case where
3873 there is one DEST_REG giv, and this giv's insn is the
3874 last use of the dest_reg of that DEST_REG giv. If the
3875 increment occurs after the address giv, then we can
3876 perform the optimization. (Otherwise, the increment
3877 would have to go before other_giv, and we would not be
3878 able to combine it with the address giv to get an
3879 auto-inc address.) */
3880 if (v->combined_with)
3882 struct induction *other_giv = 0;
3884 for (tv = bl->giv; tv; tv = tv->next_iv)
3885 if (tv->same == v)
3887 if (other_giv)
3888 break;
3889 else
3890 other_giv = tv;
3892 if (! tv && other_giv
3893 && REGNO (other_giv->dest_reg) < max_reg_before_loop
3894 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
3895 == INSN_UID (v->insn))
3896 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
3897 auto_inc_opt = 1;
3899 /* Check for case where increment is before the address
3900 giv. Do this test in "loop order". */
3901 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
3902 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3903 || (INSN_LUID (bl->biv->insn)
3904 > INSN_LUID (loop->scan_start))))
3905 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3906 && (INSN_LUID (loop->scan_start)
3907 < INSN_LUID (bl->biv->insn))))
3908 auto_inc_opt = -1;
3909 else
3910 auto_inc_opt = 1;
3912 #ifdef HAVE_cc0
3914 rtx prev;
3916 /* We can't put an insn immediately after one setting
3917 cc0, or immediately before one using cc0. */
3918 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
3919 || (auto_inc_opt == -1
3920 && (prev = prev_nonnote_insn (v->insn)) != 0
3921 && INSN_P (prev)
3922 && sets_cc0_p (PATTERN (prev))))
3923 auto_inc_opt = 0;
3925 #endif
3927 if (auto_inc_opt)
3928 v->auto_inc_opt = 1;
3930 #endif
3932 /* For each place where the biv is incremented, add an insn
3933 to increment the new, reduced reg for the giv. */
3934 for (tv = bl->biv; tv; tv = tv->next_iv)
3936 rtx insert_before;
3938 if (! auto_inc_opt)
3939 insert_before = tv->insn;
3940 else if (auto_inc_opt == 1)
3941 insert_before = NEXT_INSN (v->insn);
3942 else
3943 insert_before = v->insn;
3945 if (tv->mult_val == const1_rtx)
3946 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3947 v->new_reg, v->new_reg,
3948 0, insert_before);
3949 else /* tv->mult_val == const0_rtx */
3950 /* A multiply is acceptable here
3951 since this is presumed to be seldom executed. */
3952 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3953 v->add_val, v->new_reg,
3954 0, insert_before);
3957 /* Add code at loop start to initialize giv's reduced reg. */
3959 loop_iv_add_mult_hoist (loop,
3960 extend_value_for_giv (v, bl->initial_value),
3961 v->mult_val, v->add_val, v->new_reg);
3967 /* Check for givs whose first use is their definition and whose
3968 last use is the definition of another giv. If so, it is likely
3969 dead and should not be used to derive another giv nor to
3970 eliminate a biv. */
3972 static void
3973 loop_givs_dead_check (loop, bl)
3974 struct loop *loop ATTRIBUTE_UNUSED;
3975 struct iv_class *bl;
3977 struct induction *v;
3979 for (v = bl->giv; v; v = v->next_iv)
3981 if (v->ignore
3982 || (v->same && v->same->ignore))
3983 continue;
3985 if (v->giv_type == DEST_REG
3986 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
3988 struct induction *v1;
3990 for (v1 = bl->giv; v1; v1 = v1->next_iv)
3991 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
3992 v->maybe_dead = 1;
3998 static void
3999 loop_givs_rescan (loop, bl, reg_map)
4000 struct loop *loop;
4001 struct iv_class *bl;
4002 rtx *reg_map;
4004 struct induction *v;
4006 for (v = bl->giv; v; v = v->next_iv)
4008 if (v->same && v->same->ignore)
4009 v->ignore = 1;
4011 if (v->ignore)
4012 continue;
4014 /* Update expression if this was combined, in case other giv was
4015 replaced. */
4016 if (v->same)
4017 v->new_reg = replace_rtx (v->new_reg,
4018 v->same->dest_reg, v->same->new_reg);
4020 /* See if this register is known to be a pointer to something. If
4021 so, see if we can find the alignment. First see if there is a
4022 destination register that is a pointer. If so, this shares the
4023 alignment too. Next see if we can deduce anything from the
4024 computational information. If not, and this is a DEST_ADDR
4025 giv, at least we know that it's a pointer, though we don't know
4026 the alignment. */
4027 if (GET_CODE (v->new_reg) == REG
4028 && v->giv_type == DEST_REG
4029 && REG_POINTER (v->dest_reg))
4030 mark_reg_pointer (v->new_reg,
4031 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4032 else if (GET_CODE (v->new_reg) == REG
4033 && REG_POINTER (v->src_reg))
4035 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4037 if (align == 0
4038 || GET_CODE (v->add_val) != CONST_INT
4039 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4040 align = 0;
4042 mark_reg_pointer (v->new_reg, align);
4044 else if (GET_CODE (v->new_reg) == REG
4045 && GET_CODE (v->add_val) == REG
4046 && REG_POINTER (v->add_val))
4048 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4050 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4051 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4052 align = 0;
4054 mark_reg_pointer (v->new_reg, align);
4056 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4057 mark_reg_pointer (v->new_reg, 0);
4059 if (v->giv_type == DEST_ADDR)
4060 /* Store reduced reg as the address in the memref where we found
4061 this giv. */
4062 validate_change (v->insn, v->location, v->new_reg, 0);
4063 else if (v->replaceable)
4065 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4067 else
4069 /* Not replaceable; emit an insn to set the original giv reg from
4070 the reduced giv, same as above. */
4071 loop_insn_emit_after (loop, 0, v->insn,
4072 gen_move_insn (v->dest_reg, v->new_reg));
4075 /* When a loop is reversed, givs which depend on the reversed
4076 biv, and which are live outside the loop, must be set to their
4077 correct final value. This insn is only needed if the giv is
4078 not replaceable. The correct final value is the same as the
4079 value that the giv starts the reversed loop with. */
4080 if (bl->reversed && ! v->replaceable)
4081 loop_iv_add_mult_sink (loop,
4082 extend_value_for_giv (v, bl->initial_value),
4083 v->mult_val, v->add_val, v->dest_reg);
4084 else if (v->final_value)
4085 loop_insn_sink_or_swim (loop,
4086 gen_move_insn (v->dest_reg, v->final_value));
4088 if (loop_dump_stream)
4090 fprintf (loop_dump_stream, "giv at %d reduced to ",
4091 INSN_UID (v->insn));
4092 print_simple_rtl (loop_dump_stream, v->new_reg);
4093 fprintf (loop_dump_stream, "\n");
4099 static int
4100 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4101 struct loop *loop ATTRIBUTE_UNUSED;
4102 struct iv_class *bl;
4103 struct induction *v;
4104 rtx test_reg;
4106 int add_cost;
4107 int benefit;
4109 benefit = v->benefit;
4110 PUT_MODE (test_reg, v->mode);
4111 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4112 test_reg, test_reg);
4114 /* Reduce benefit if not replaceable, since we will insert a
4115 move-insn to replace the insn that calculates this giv. Don't do
4116 this unless the giv is a user variable, since it will often be
4117 marked non-replaceable because of the duplication of the exit
4118 code outside the loop. In such a case, the copies we insert are
4119 dead and will be deleted. So they don't have a cost. Similar
4120 situations exist. */
4121 /* ??? The new final_[bg]iv_value code does a much better job of
4122 finding replaceable giv's, and hence this code may no longer be
4123 necessary. */
4124 if (! v->replaceable && ! bl->eliminable
4125 && REG_USERVAR_P (v->dest_reg))
4126 benefit -= copy_cost;
4128 /* Decrease the benefit to count the add-insns that we will insert
4129 to increment the reduced reg for the giv. ??? This can
4130 overestimate the run-time cost of the additional insns, e.g. if
4131 there are multiple basic blocks that increment the biv, but only
4132 one of these blocks is executed during each iteration. There is
4133 no good way to detect cases like this with the current structure
4134 of the loop optimizer. This code is more accurate for
4135 determining code size than run-time benefits. */
4136 benefit -= add_cost * bl->biv_count;
4138 /* Decide whether to strength-reduce this giv or to leave the code
4139 unchanged (recompute it from the biv each time it is used). This
4140 decision can be made independently for each giv. */
4142 #ifdef AUTO_INC_DEC
4143 /* Attempt to guess whether autoincrement will handle some of the
4144 new add insns; if so, increase BENEFIT (undo the subtraction of
4145 add_cost that was done above). */
4146 if (v->giv_type == DEST_ADDR
4147 /* Increasing the benefit is risky, since this is only a guess.
4148 Avoid increasing register pressure in cases where there would
4149 be no other benefit from reducing this giv. */
4150 && benefit > 0
4151 && GET_CODE (v->mult_val) == CONST_INT)
4153 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4155 if (HAVE_POST_INCREMENT
4156 && INTVAL (v->mult_val) == size)
4157 benefit += add_cost * bl->biv_count;
4158 else if (HAVE_PRE_INCREMENT
4159 && INTVAL (v->mult_val) == size)
4160 benefit += add_cost * bl->biv_count;
4161 else if (HAVE_POST_DECREMENT
4162 && -INTVAL (v->mult_val) == size)
4163 benefit += add_cost * bl->biv_count;
4164 else if (HAVE_PRE_DECREMENT
4165 && -INTVAL (v->mult_val) == size)
4166 benefit += add_cost * bl->biv_count;
4168 #endif
4170 return benefit;
4174 /* Free IV structures for LOOP. */
4176 static void
4177 loop_ivs_free (loop)
4178 struct loop *loop;
4180 struct loop_ivs *ivs = LOOP_IVS (loop);
4181 struct iv_class *iv = ivs->list;
4183 free (ivs->regs);
4185 while (iv)
4187 struct iv_class *next = iv->next;
4188 struct induction *induction;
4189 struct induction *next_induction;
4191 for (induction = iv->biv; induction; induction = next_induction)
4193 next_induction = induction->next_iv;
4194 free (induction);
4196 for (induction = iv->giv; induction; induction = next_induction)
4198 next_induction = induction->next_iv;
4199 free (induction);
4202 free (iv);
4203 iv = next;
4208 /* Perform strength reduction and induction variable elimination.
4210 Pseudo registers created during this function will be beyond the
4211 last valid index in several tables including
4212 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4213 problem here, because the added registers cannot be givs outside of
4214 their loop, and hence will never be reconsidered. But scan_loop
4215 must check regnos to make sure they are in bounds. */
4217 static void
4218 strength_reduce (loop, flags)
4219 struct loop *loop;
4220 int flags;
4222 struct loop_info *loop_info = LOOP_INFO (loop);
4223 struct loop_regs *regs = LOOP_REGS (loop);
4224 struct loop_ivs *ivs = LOOP_IVS (loop);
4225 rtx p;
4226 /* Temporary list pointer for traversing ivs->list. */
4227 struct iv_class *bl;
4228 /* Ratio of extra register life span we can justify
4229 for saving an instruction. More if loop doesn't call subroutines
4230 since in that case saving an insn makes more difference
4231 and more registers are available. */
4232 /* ??? could set this to last value of threshold in move_movables */
4233 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4234 /* Map of pseudo-register replacements. */
4235 rtx *reg_map = NULL;
4236 int reg_map_size;
4237 int unrolled_insn_copies = 0;
4238 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4239 int insn_count = count_insns_in_loop (loop);
4241 addr_placeholder = gen_reg_rtx (Pmode);
4243 ivs->n_regs = max_reg_before_loop;
4244 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4246 /* Find all BIVs in loop. */
4247 loop_bivs_find (loop);
4249 /* Exit if there are no bivs. */
4250 if (! ivs->list)
4252 /* Can still unroll the loop anyways, but indicate that there is no
4253 strength reduction info available. */
4254 if (flags & LOOP_UNROLL)
4255 unroll_loop (loop, insn_count, 0);
4257 loop_ivs_free (loop);
4258 return;
4261 /* Determine how BIVS are initialised by looking through pre-header
4262 extended basic block. */
4263 loop_bivs_init_find (loop);
4265 /* Look at the each biv and see if we can say anything better about its
4266 initial value from any initializing insns set up above. */
4267 loop_bivs_check (loop);
4269 /* Search the loop for general induction variables. */
4270 loop_givs_find (loop);
4272 /* Try to calculate and save the number of loop iterations. This is
4273 set to zero if the actual number can not be calculated. This must
4274 be called after all giv's have been identified, since otherwise it may
4275 fail if the iteration variable is a giv. */
4276 loop_iterations (loop);
4278 /* Now for each giv for which we still don't know whether or not it is
4279 replaceable, check to see if it is replaceable because its final value
4280 can be calculated. This must be done after loop_iterations is called,
4281 so that final_giv_value will work correctly. */
4282 loop_givs_check (loop);
4284 /* Try to prove that the loop counter variable (if any) is always
4285 nonnegative; if so, record that fact with a REG_NONNEG note
4286 so that "decrement and branch until zero" insn can be used. */
4287 check_dbra_loop (loop, insn_count);
4289 /* Create reg_map to hold substitutions for replaceable giv regs.
4290 Some givs might have been made from biv increments, so look at
4291 ivs->reg_iv_type for a suitable size. */
4292 reg_map_size = ivs->n_regs;
4293 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4295 /* Examine each iv class for feasibility of strength reduction/induction
4296 variable elimination. */
4298 for (bl = ivs->list; bl; bl = bl->next)
4300 struct induction *v;
4301 int benefit;
4303 /* Test whether it will be possible to eliminate this biv
4304 provided all givs are reduced. */
4305 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
4307 /* Check each extension dependent giv in this class to see if its
4308 root biv is safe from wrapping in the interior mode. */
4309 check_ext_dependant_givs (bl, loop_info);
4311 /* Combine all giv's for this iv_class. */
4312 combine_givs (regs, bl);
4314 /* This will be true at the end, if all givs which depend on this
4315 biv have been strength reduced.
4316 We can't (currently) eliminate the biv unless this is so. */
4317 bl->all_reduced = 1;
4319 for (v = bl->giv; v; v = v->next_iv)
4321 struct induction *tv;
4323 if (v->ignore || v->same)
4324 continue;
4326 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
4328 /* If an insn is not to be strength reduced, then set its ignore
4329 flag, and clear bl->all_reduced. */
4331 /* A giv that depends on a reversed biv must be reduced if it is
4332 used after the loop exit, otherwise, it would have the wrong
4333 value after the loop exit. To make it simple, just reduce all
4334 of such giv's whether or not we know they are used after the loop
4335 exit. */
4337 if (! flag_reduce_all_givs
4338 && v->lifetime * threshold * benefit < insn_count
4339 && ! bl->reversed)
4341 if (loop_dump_stream)
4342 fprintf (loop_dump_stream,
4343 "giv of insn %d not worth while, %d vs %d.\n",
4344 INSN_UID (v->insn),
4345 v->lifetime * threshold * benefit, insn_count);
4346 v->ignore = 1;
4347 bl->all_reduced = 0;
4349 else
4351 /* Check that we can increment the reduced giv without a
4352 multiply insn. If not, reject it. */
4354 for (tv = bl->biv; tv; tv = tv->next_iv)
4355 if (tv->mult_val == const1_rtx
4356 && ! product_cheap_p (tv->add_val, v->mult_val))
4358 if (loop_dump_stream)
4359 fprintf (loop_dump_stream,
4360 "giv of insn %d: would need a multiply.\n",
4361 INSN_UID (v->insn));
4362 v->ignore = 1;
4363 bl->all_reduced = 0;
4364 break;
4369 /* Check for givs whose first use is their definition and whose
4370 last use is the definition of another giv. If so, it is likely
4371 dead and should not be used to derive another giv nor to
4372 eliminate a biv. */
4373 loop_givs_dead_check (loop, bl);
4375 /* Reduce each giv that we decided to reduce. */
4376 loop_givs_reduce (loop, bl);
4378 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4379 as not reduced.
4381 For each giv register that can be reduced now: if replaceable,
4382 substitute reduced reg wherever the old giv occurs;
4383 else add new move insn "giv_reg = reduced_reg". */
4384 loop_givs_rescan (loop, bl, reg_map);
4386 /* All the givs based on the biv bl have been reduced if they
4387 merit it. */
4389 /* For each giv not marked as maybe dead that has been combined with a
4390 second giv, clear any "maybe dead" mark on that second giv.
4391 v->new_reg will either be or refer to the register of the giv it
4392 combined with.
4394 Doing this clearing avoids problems in biv elimination where
4395 a giv's new_reg is a complex value that can't be put in the
4396 insn but the giv combined with (with a reg as new_reg) is
4397 marked maybe_dead. Since the register will be used in either
4398 case, we'd prefer it be used from the simpler giv. */
4400 for (v = bl->giv; v; v = v->next_iv)
4401 if (! v->maybe_dead && v->same)
4402 v->same->maybe_dead = 0;
4404 /* Try to eliminate the biv, if it is a candidate.
4405 This won't work if ! bl->all_reduced,
4406 since the givs we planned to use might not have been reduced.
4408 We have to be careful that we didn't initially think we could
4409 eliminate this biv because of a giv that we now think may be
4410 dead and shouldn't be used as a biv replacement.
4412 Also, there is the possibility that we may have a giv that looks
4413 like it can be used to eliminate a biv, but the resulting insn
4414 isn't valid. This can happen, for example, on the 88k, where a
4415 JUMP_INSN can compare a register only with zero. Attempts to
4416 replace it with a compare with a constant will fail.
4418 Note that in cases where this call fails, we may have replaced some
4419 of the occurrences of the biv with a giv, but no harm was done in
4420 doing so in the rare cases where it can occur. */
4422 if (bl->all_reduced == 1 && bl->eliminable
4423 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
4425 /* ?? If we created a new test to bypass the loop entirely,
4426 or otherwise drop straight in, based on this test, then
4427 we might want to rewrite it also. This way some later
4428 pass has more hope of removing the initialization of this
4429 biv entirely. */
4431 /* If final_value != 0, then the biv may be used after loop end
4432 and we must emit an insn to set it just in case.
4434 Reversed bivs already have an insn after the loop setting their
4435 value, so we don't need another one. We can't calculate the
4436 proper final value for such a biv here anyways. */
4437 if (bl->final_value && ! bl->reversed)
4438 loop_insn_sink_or_swim (loop, gen_move_insn
4439 (bl->biv->dest_reg, bl->final_value));
4441 if (loop_dump_stream)
4442 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4443 bl->regno);
4447 /* Go through all the instructions in the loop, making all the
4448 register substitutions scheduled in REG_MAP. */
4450 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
4451 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4452 || GET_CODE (p) == CALL_INSN)
4454 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
4455 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
4456 INSN_CODE (p) = -1;
4459 if (loop_info->n_iterations > 0)
4461 /* When we completely unroll a loop we will likely not need the increment
4462 of the loop BIV and we will not need the conditional branch at the
4463 end of the loop. */
4464 unrolled_insn_copies = insn_count - 2;
4466 #ifdef HAVE_cc0
4467 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
4468 need the comparison before the conditional branch at the end of the
4469 loop. */
4470 unrolled_insn_copies -= 1;
4471 #endif
4473 /* We'll need one copy for each loop iteration. */
4474 unrolled_insn_copies *= loop_info->n_iterations;
4476 /* A little slop to account for the ability to remove initialization
4477 code, better CSE, and other secondary benefits of completely
4478 unrolling some loops. */
4479 unrolled_insn_copies -= 1;
4481 /* Clamp the value. */
4482 if (unrolled_insn_copies < 0)
4483 unrolled_insn_copies = 0;
4486 /* Unroll loops from within strength reduction so that we can use the
4487 induction variable information that strength_reduce has already
4488 collected. Always unroll loops that would be as small or smaller
4489 unrolled than when rolled. */
4490 if ((flags & LOOP_UNROLL)
4491 || (loop_info->n_iterations > 0
4492 && unrolled_insn_copies <= insn_count))
4493 unroll_loop (loop, insn_count, 1);
4495 #ifdef HAVE_doloop_end
4496 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
4497 doloop_optimize (loop);
4498 #endif /* HAVE_doloop_end */
4500 if (loop_dump_stream)
4501 fprintf (loop_dump_stream, "\n");
4503 loop_ivs_free (loop);
4504 if (reg_map)
4505 free (reg_map);
4508 /*Record all basic induction variables calculated in the insn. */
4509 static rtx
4510 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
4511 struct loop *loop;
4512 rtx p;
4513 int not_every_iteration;
4514 int maybe_multiple;
4516 struct loop_ivs *ivs = LOOP_IVS (loop);
4517 rtx set;
4518 rtx dest_reg;
4519 rtx inc_val;
4520 rtx mult_val;
4521 rtx *location;
4523 if (GET_CODE (p) == INSN
4524 && (set = single_set (p))
4525 && GET_CODE (SET_DEST (set)) == REG)
4527 dest_reg = SET_DEST (set);
4528 if (REGNO (dest_reg) < max_reg_before_loop
4529 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
4530 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
4532 if (basic_induction_var (loop, SET_SRC (set),
4533 GET_MODE (SET_SRC (set)),
4534 dest_reg, p, &inc_val, &mult_val,
4535 &location))
4537 /* It is a possible basic induction variable.
4538 Create and initialize an induction structure for it. */
4540 struct induction *v
4541 = (struct induction *) xmalloc (sizeof (struct induction));
4543 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
4544 not_every_iteration, maybe_multiple);
4545 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
4547 else if (REGNO (dest_reg) < ivs->n_regs)
4548 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
4551 return p;
4554 /* Record all givs calculated in the insn.
4555 A register is a giv if: it is only set once, it is a function of a
4556 biv and a constant (or invariant), and it is not a biv. */
4557 static rtx
4558 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
4559 struct loop *loop;
4560 rtx p;
4561 int not_every_iteration;
4562 int maybe_multiple;
4564 struct loop_regs *regs = LOOP_REGS (loop);
4566 rtx set;
4567 /* Look for a general induction variable in a register. */
4568 if (GET_CODE (p) == INSN
4569 && (set = single_set (p))
4570 && GET_CODE (SET_DEST (set)) == REG
4571 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
4573 rtx src_reg;
4574 rtx dest_reg;
4575 rtx add_val;
4576 rtx mult_val;
4577 rtx ext_val;
4578 int benefit;
4579 rtx regnote = 0;
4580 rtx last_consec_insn;
4582 dest_reg = SET_DEST (set);
4583 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4584 return p;
4586 if (/* SET_SRC is a giv. */
4587 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
4588 &mult_val, &ext_val, 0, &benefit, VOIDmode)
4589 /* Equivalent expression is a giv. */
4590 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4591 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
4592 &add_val, &mult_val, &ext_val, 0,
4593 &benefit, VOIDmode)))
4594 /* Don't try to handle any regs made by loop optimization.
4595 We have nothing on them in regno_first_uid, etc. */
4596 && REGNO (dest_reg) < max_reg_before_loop
4597 /* Don't recognize a BASIC_INDUCT_VAR here. */
4598 && dest_reg != src_reg
4599 /* This must be the only place where the register is set. */
4600 && (regs->array[REGNO (dest_reg)].n_times_set == 1
4601 /* or all sets must be consecutive and make a giv. */
4602 || (benefit = consec_sets_giv (loop, benefit, p,
4603 src_reg, dest_reg,
4604 &add_val, &mult_val, &ext_val,
4605 &last_consec_insn))))
4607 struct induction *v
4608 = (struct induction *) xmalloc (sizeof (struct induction));
4610 /* If this is a library call, increase benefit. */
4611 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4612 benefit += libcall_benefit (p);
4614 /* Skip the consecutive insns, if there are any. */
4615 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
4616 p = last_consec_insn;
4618 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
4619 ext_val, benefit, DEST_REG, not_every_iteration,
4620 maybe_multiple, NULL_PTR);
4625 #ifndef DONT_REDUCE_ADDR
4626 /* Look for givs which are memory addresses. */
4627 /* This resulted in worse code on a VAX 8600. I wonder if it
4628 still does. */
4629 if (GET_CODE (p) == INSN)
4630 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
4631 maybe_multiple);
4632 #endif
4634 /* Update the status of whether giv can derive other givs. This can
4635 change when we pass a label or an insn that updates a biv. */
4636 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4637 || GET_CODE (p) == CODE_LABEL)
4638 update_giv_derive (loop, p);
4639 return p;
4642 /* Return 1 if X is a valid source for an initial value (or as value being
4643 compared against in an initial test).
4645 X must be either a register or constant and must not be clobbered between
4646 the current insn and the start of the loop.
4648 INSN is the insn containing X. */
4650 static int
4651 valid_initial_value_p (x, insn, call_seen, loop_start)
4652 rtx x;
4653 rtx insn;
4654 int call_seen;
4655 rtx loop_start;
4657 if (CONSTANT_P (x))
4658 return 1;
4660 /* Only consider pseudos we know about initialized in insns whose luids
4661 we know. */
4662 if (GET_CODE (x) != REG
4663 || REGNO (x) >= max_reg_before_loop)
4664 return 0;
4666 /* Don't use call-clobbered registers across a call which clobbers it. On
4667 some machines, don't use any hard registers at all. */
4668 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4669 && (SMALL_REGISTER_CLASSES
4670 || (call_used_regs[REGNO (x)] && call_seen)))
4671 return 0;
4673 /* Don't use registers that have been clobbered before the start of the
4674 loop. */
4675 if (reg_set_between_p (x, insn, loop_start))
4676 return 0;
4678 return 1;
4681 /* Scan X for memory refs and check each memory address
4682 as a possible giv. INSN is the insn whose pattern X comes from.
4683 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4684 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
4685 more thanonce in each loop iteration. */
4687 static void
4688 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
4689 const struct loop *loop;
4690 rtx x;
4691 rtx insn;
4692 int not_every_iteration, maybe_multiple;
4694 register int i, j;
4695 register enum rtx_code code;
4696 register const char *fmt;
4698 if (x == 0)
4699 return;
4701 code = GET_CODE (x);
4702 switch (code)
4704 case REG:
4705 case CONST_INT:
4706 case CONST:
4707 case CONST_DOUBLE:
4708 case SYMBOL_REF:
4709 case LABEL_REF:
4710 case PC:
4711 case CC0:
4712 case ADDR_VEC:
4713 case ADDR_DIFF_VEC:
4714 case USE:
4715 case CLOBBER:
4716 return;
4718 case MEM:
4720 rtx src_reg;
4721 rtx add_val;
4722 rtx mult_val;
4723 rtx ext_val;
4724 int benefit;
4726 /* This code used to disable creating GIVs with mult_val == 1 and
4727 add_val == 0. However, this leads to lost optimizations when
4728 it comes time to combine a set of related DEST_ADDR GIVs, since
4729 this one would not be seen. */
4731 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
4732 &mult_val, &ext_val, 1, &benefit,
4733 GET_MODE (x)))
4735 /* Found one; record it. */
4736 struct induction *v
4737 = (struct induction *) xmalloc (sizeof (struct induction));
4739 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
4740 add_val, ext_val, benefit, DEST_ADDR,
4741 not_every_iteration, maybe_multiple, &XEXP (x, 0));
4743 v->mem = x;
4746 return;
4748 default:
4749 break;
4752 /* Recursively scan the subexpressions for other mem refs. */
4754 fmt = GET_RTX_FORMAT (code);
4755 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4756 if (fmt[i] == 'e')
4757 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
4758 maybe_multiple);
4759 else if (fmt[i] == 'E')
4760 for (j = 0; j < XVECLEN (x, i); j++)
4761 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
4762 maybe_multiple);
4765 /* Fill in the data about one biv update.
4766 V is the `struct induction' in which we record the biv. (It is
4767 allocated by the caller, with alloca.)
4768 INSN is the insn that sets it.
4769 DEST_REG is the biv's reg.
4771 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4772 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4773 being set to INC_VAL.
4775 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4776 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4777 can be executed more than once per iteration. If MAYBE_MULTIPLE
4778 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4779 executed exactly once per iteration. */
4781 static void
4782 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
4783 not_every_iteration, maybe_multiple)
4784 struct loop *loop;
4785 struct induction *v;
4786 rtx insn;
4787 rtx dest_reg;
4788 rtx inc_val;
4789 rtx mult_val;
4790 rtx *location;
4791 int not_every_iteration;
4792 int maybe_multiple;
4794 struct loop_ivs *ivs = LOOP_IVS (loop);
4795 struct iv_class *bl;
4797 v->insn = insn;
4798 v->src_reg = dest_reg;
4799 v->dest_reg = dest_reg;
4800 v->mult_val = mult_val;
4801 v->add_val = inc_val;
4802 v->ext_dependant = NULL_RTX;
4803 v->location = location;
4804 v->mode = GET_MODE (dest_reg);
4805 v->always_computable = ! not_every_iteration;
4806 v->always_executed = ! not_every_iteration;
4807 v->maybe_multiple = maybe_multiple;
4809 /* Add this to the reg's iv_class, creating a class
4810 if this is the first incrementation of the reg. */
4812 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
4813 if (bl == 0)
4815 /* Create and initialize new iv_class. */
4817 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
4819 bl->regno = REGNO (dest_reg);
4820 bl->biv = 0;
4821 bl->giv = 0;
4822 bl->biv_count = 0;
4823 bl->giv_count = 0;
4825 /* Set initial value to the reg itself. */
4826 bl->initial_value = dest_reg;
4827 bl->final_value = 0;
4828 /* We haven't seen the initializing insn yet */
4829 bl->init_insn = 0;
4830 bl->init_set = 0;
4831 bl->initial_test = 0;
4832 bl->incremented = 0;
4833 bl->eliminable = 0;
4834 bl->nonneg = 0;
4835 bl->reversed = 0;
4836 bl->total_benefit = 0;
4838 /* Add this class to ivs->list. */
4839 bl->next = ivs->list;
4840 ivs->list = bl;
4842 /* Put it in the array of biv register classes. */
4843 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
4846 /* Update IV_CLASS entry for this biv. */
4847 v->next_iv = bl->biv;
4848 bl->biv = v;
4849 bl->biv_count++;
4850 if (mult_val == const1_rtx)
4851 bl->incremented = 1;
4853 if (loop_dump_stream)
4854 loop_biv_dump (v, loop_dump_stream, 0);
4857 /* Fill in the data about one giv.
4858 V is the `struct induction' in which we record the giv. (It is
4859 allocated by the caller, with alloca.)
4860 INSN is the insn that sets it.
4861 BENEFIT estimates the savings from deleting this insn.
4862 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4863 into a register or is used as a memory address.
4865 SRC_REG is the biv reg which the giv is computed from.
4866 DEST_REG is the giv's reg (if the giv is stored in a reg).
4867 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4868 LOCATION points to the place where this giv's value appears in INSN. */
4870 static void
4871 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
4872 benefit, type, not_every_iteration, maybe_multiple, location)
4873 const struct loop *loop;
4874 struct induction *v;
4875 rtx insn;
4876 rtx src_reg;
4877 rtx dest_reg;
4878 rtx mult_val, add_val, ext_val;
4879 int benefit;
4880 enum g_types type;
4881 int not_every_iteration, maybe_multiple;
4882 rtx *location;
4884 struct loop_ivs *ivs = LOOP_IVS (loop);
4885 struct induction *b;
4886 struct iv_class *bl;
4887 rtx set = single_set (insn);
4888 rtx temp;
4890 /* Attempt to prove constantness of the values. */
4891 temp = simplify_rtx (add_val);
4892 if (temp)
4893 add_val = temp;
4895 v->insn = insn;
4896 v->src_reg = src_reg;
4897 v->giv_type = type;
4898 v->dest_reg = dest_reg;
4899 v->mult_val = mult_val;
4900 v->add_val = add_val;
4901 v->ext_dependant = ext_val;
4902 v->benefit = benefit;
4903 v->location = location;
4904 v->cant_derive = 0;
4905 v->combined_with = 0;
4906 v->maybe_multiple = maybe_multiple;
4907 v->maybe_dead = 0;
4908 v->derive_adjustment = 0;
4909 v->same = 0;
4910 v->ignore = 0;
4911 v->new_reg = 0;
4912 v->final_value = 0;
4913 v->same_insn = 0;
4914 v->auto_inc_opt = 0;
4915 v->unrolled = 0;
4916 v->shared = 0;
4918 /* The v->always_computable field is used in update_giv_derive, to
4919 determine whether a giv can be used to derive another giv. For a
4920 DEST_REG giv, INSN computes a new value for the giv, so its value
4921 isn't computable if INSN insn't executed every iteration.
4922 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4923 it does not compute a new value. Hence the value is always computable
4924 regardless of whether INSN is executed each iteration. */
4926 if (type == DEST_ADDR)
4927 v->always_computable = 1;
4928 else
4929 v->always_computable = ! not_every_iteration;
4931 v->always_executed = ! not_every_iteration;
4933 if (type == DEST_ADDR)
4935 v->mode = GET_MODE (*location);
4936 v->lifetime = 1;
4938 else /* type == DEST_REG */
4940 v->mode = GET_MODE (SET_DEST (set));
4942 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
4944 /* If the lifetime is zero, it means that this register is
4945 really a dead store. So mark this as a giv that can be
4946 ignored. This will not prevent the biv from being eliminated. */
4947 if (v->lifetime == 0)
4948 v->ignore = 1;
4950 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
4951 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
4954 /* Add the giv to the class of givs computed from one biv. */
4956 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
4957 if (bl)
4959 v->next_iv = bl->giv;
4960 bl->giv = v;
4961 /* Don't count DEST_ADDR. This is supposed to count the number of
4962 insns that calculate givs. */
4963 if (type == DEST_REG)
4964 bl->giv_count++;
4965 bl->total_benefit += benefit;
4967 else
4968 /* Fatal error, biv missing for this giv? */
4969 abort ();
4971 if (type == DEST_ADDR)
4972 v->replaceable = 1;
4973 else
4975 /* The giv can be replaced outright by the reduced register only if all
4976 of the following conditions are true:
4977 - the insn that sets the giv is always executed on any iteration
4978 on which the giv is used at all
4979 (there are two ways to deduce this:
4980 either the insn is executed on every iteration,
4981 or all uses follow that insn in the same basic block),
4982 - the giv is not used outside the loop
4983 - no assignments to the biv occur during the giv's lifetime. */
4985 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
4986 /* Previous line always fails if INSN was moved by loop opt. */
4987 && REGNO_LAST_LUID (REGNO (dest_reg))
4988 < INSN_LUID (loop->end)
4989 && (! not_every_iteration
4990 || last_use_this_basic_block (dest_reg, insn)))
4992 /* Now check that there are no assignments to the biv within the
4993 giv's lifetime. This requires two separate checks. */
4995 /* Check each biv update, and fail if any are between the first
4996 and last use of the giv.
4998 If this loop contains an inner loop that was unrolled, then
4999 the insn modifying the biv may have been emitted by the loop
5000 unrolling code, and hence does not have a valid luid. Just
5001 mark the biv as not replaceable in this case. It is not very
5002 useful as a biv, because it is used in two different loops.
5003 It is very unlikely that we would be able to optimize the giv
5004 using this biv anyways. */
5006 v->replaceable = 1;
5007 for (b = bl->biv; b; b = b->next_iv)
5009 if (INSN_UID (b->insn) >= max_uid_for_loop
5010 || ((INSN_LUID (b->insn)
5011 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5012 && (INSN_LUID (b->insn)
5013 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5015 v->replaceable = 0;
5016 v->not_replaceable = 1;
5017 break;
5021 /* If there are any backwards branches that go from after the
5022 biv update to before it, then this giv is not replaceable. */
5023 if (v->replaceable)
5024 for (b = bl->biv; b; b = b->next_iv)
5025 if (back_branch_in_range_p (loop, b->insn))
5027 v->replaceable = 0;
5028 v->not_replaceable = 1;
5029 break;
5032 else
5034 /* May still be replaceable, we don't have enough info here to
5035 decide. */
5036 v->replaceable = 0;
5037 v->not_replaceable = 0;
5041 /* Record whether the add_val contains a const_int, for later use by
5042 combine_givs. */
5044 rtx tem = add_val;
5046 v->no_const_addval = 1;
5047 if (tem == const0_rtx)
5049 else if (CONSTANT_P (add_val))
5050 v->no_const_addval = 0;
5051 if (GET_CODE (tem) == PLUS)
5053 while (1)
5055 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5056 tem = XEXP (tem, 0);
5057 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5058 tem = XEXP (tem, 1);
5059 else
5060 break;
5062 if (CONSTANT_P (XEXP (tem, 1)))
5063 v->no_const_addval = 0;
5067 if (loop_dump_stream)
5068 loop_giv_dump (v, loop_dump_stream, 0);
5071 /* All this does is determine whether a giv can be made replaceable because
5072 its final value can be calculated. This code can not be part of record_giv
5073 above, because final_giv_value requires that the number of loop iterations
5074 be known, and that can not be accurately calculated until after all givs
5075 have been identified. */
5077 static void
5078 check_final_value (loop, v)
5079 const struct loop *loop;
5080 struct induction *v;
5082 struct loop_ivs *ivs = LOOP_IVS (loop);
5083 struct iv_class *bl;
5084 rtx final_value = 0;
5086 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5088 /* DEST_ADDR givs will never reach here, because they are always marked
5089 replaceable above in record_giv. */
5091 /* The giv can be replaced outright by the reduced register only if all
5092 of the following conditions are true:
5093 - the insn that sets the giv is always executed on any iteration
5094 on which the giv is used at all
5095 (there are two ways to deduce this:
5096 either the insn is executed on every iteration,
5097 or all uses follow that insn in the same basic block),
5098 - its final value can be calculated (this condition is different
5099 than the one above in record_giv)
5100 - it's not used before the it's set
5101 - no assignments to the biv occur during the giv's lifetime. */
5103 #if 0
5104 /* This is only called now when replaceable is known to be false. */
5105 /* Clear replaceable, so that it won't confuse final_giv_value. */
5106 v->replaceable = 0;
5107 #endif
5109 if ((final_value = final_giv_value (loop, v))
5110 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5112 int biv_increment_seen = 0, before_giv_insn = 0;
5113 rtx p = v->insn;
5114 rtx last_giv_use;
5116 v->replaceable = 1;
5118 /* When trying to determine whether or not a biv increment occurs
5119 during the lifetime of the giv, we can ignore uses of the variable
5120 outside the loop because final_value is true. Hence we can not
5121 use regno_last_uid and regno_first_uid as above in record_giv. */
5123 /* Search the loop to determine whether any assignments to the
5124 biv occur during the giv's lifetime. Start with the insn
5125 that sets the giv, and search around the loop until we come
5126 back to that insn again.
5128 Also fail if there is a jump within the giv's lifetime that jumps
5129 to somewhere outside the lifetime but still within the loop. This
5130 catches spaghetti code where the execution order is not linear, and
5131 hence the above test fails. Here we assume that the giv lifetime
5132 does not extend from one iteration of the loop to the next, so as
5133 to make the test easier. Since the lifetime isn't known yet,
5134 this requires two loops. See also record_giv above. */
5136 last_giv_use = v->insn;
5138 while (1)
5140 p = NEXT_INSN (p);
5141 if (p == loop->end)
5143 before_giv_insn = 1;
5144 p = NEXT_INSN (loop->start);
5146 if (p == v->insn)
5147 break;
5149 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5150 || GET_CODE (p) == CALL_INSN)
5152 /* It is possible for the BIV increment to use the GIV if we
5153 have a cycle. Thus we must be sure to check each insn for
5154 both BIV and GIV uses, and we must check for BIV uses
5155 first. */
5157 if (! biv_increment_seen
5158 && reg_set_p (v->src_reg, PATTERN (p)))
5159 biv_increment_seen = 1;
5161 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5163 if (biv_increment_seen || before_giv_insn)
5165 v->replaceable = 0;
5166 v->not_replaceable = 1;
5167 break;
5169 last_giv_use = p;
5174 /* Now that the lifetime of the giv is known, check for branches
5175 from within the lifetime to outside the lifetime if it is still
5176 replaceable. */
5178 if (v->replaceable)
5180 p = v->insn;
5181 while (1)
5183 p = NEXT_INSN (p);
5184 if (p == loop->end)
5185 p = NEXT_INSN (loop->start);
5186 if (p == last_giv_use)
5187 break;
5189 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5190 && LABEL_NAME (JUMP_LABEL (p))
5191 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5192 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5193 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5194 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5196 v->replaceable = 0;
5197 v->not_replaceable = 1;
5199 if (loop_dump_stream)
5200 fprintf (loop_dump_stream,
5201 "Found branch outside giv lifetime.\n");
5203 break;
5208 /* If it is replaceable, then save the final value. */
5209 if (v->replaceable)
5210 v->final_value = final_value;
5213 if (loop_dump_stream && v->replaceable)
5214 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5215 INSN_UID (v->insn), REGNO (v->dest_reg));
5218 /* Update the status of whether a giv can derive other givs.
5220 We need to do something special if there is or may be an update to the biv
5221 between the time the giv is defined and the time it is used to derive
5222 another giv.
5224 In addition, a giv that is only conditionally set is not allowed to
5225 derive another giv once a label has been passed.
5227 The cases we look at are when a label or an update to a biv is passed. */
5229 static void
5230 update_giv_derive (loop, p)
5231 const struct loop *loop;
5232 rtx p;
5234 struct loop_ivs *ivs = LOOP_IVS (loop);
5235 struct iv_class *bl;
5236 struct induction *biv, *giv;
5237 rtx tem;
5238 int dummy;
5240 /* Search all IV classes, then all bivs, and finally all givs.
5242 There are three cases we are concerned with. First we have the situation
5243 of a giv that is only updated conditionally. In that case, it may not
5244 derive any givs after a label is passed.
5246 The second case is when a biv update occurs, or may occur, after the
5247 definition of a giv. For certain biv updates (see below) that are
5248 known to occur between the giv definition and use, we can adjust the
5249 giv definition. For others, or when the biv update is conditional,
5250 we must prevent the giv from deriving any other givs. There are two
5251 sub-cases within this case.
5253 If this is a label, we are concerned with any biv update that is done
5254 conditionally, since it may be done after the giv is defined followed by
5255 a branch here (actually, we need to pass both a jump and a label, but
5256 this extra tracking doesn't seem worth it).
5258 If this is a jump, we are concerned about any biv update that may be
5259 executed multiple times. We are actually only concerned about
5260 backward jumps, but it is probably not worth performing the test
5261 on the jump again here.
5263 If this is a biv update, we must adjust the giv status to show that a
5264 subsequent biv update was performed. If this adjustment cannot be done,
5265 the giv cannot derive further givs. */
5267 for (bl = ivs->list; bl; bl = bl->next)
5268 for (biv = bl->biv; biv; biv = biv->next_iv)
5269 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5270 || biv->insn == p)
5272 for (giv = bl->giv; giv; giv = giv->next_iv)
5274 /* If cant_derive is already true, there is no point in
5275 checking all of these conditions again. */
5276 if (giv->cant_derive)
5277 continue;
5279 /* If this giv is conditionally set and we have passed a label,
5280 it cannot derive anything. */
5281 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5282 giv->cant_derive = 1;
5284 /* Skip givs that have mult_val == 0, since
5285 they are really invariants. Also skip those that are
5286 replaceable, since we know their lifetime doesn't contain
5287 any biv update. */
5288 else if (giv->mult_val == const0_rtx || giv->replaceable)
5289 continue;
5291 /* The only way we can allow this giv to derive another
5292 is if this is a biv increment and we can form the product
5293 of biv->add_val and giv->mult_val. In this case, we will
5294 be able to compute a compensation. */
5295 else if (biv->insn == p)
5297 rtx ext_val_dummy;
5299 tem = 0;
5300 if (biv->mult_val == const1_rtx)
5301 tem = simplify_giv_expr (loop,
5302 gen_rtx_MULT (giv->mode,
5303 biv->add_val,
5304 giv->mult_val),
5305 &ext_val_dummy, &dummy);
5307 if (tem && giv->derive_adjustment)
5308 tem = simplify_giv_expr
5309 (loop,
5310 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5311 &ext_val_dummy, &dummy);
5313 if (tem)
5314 giv->derive_adjustment = tem;
5315 else
5316 giv->cant_derive = 1;
5318 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5319 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5320 giv->cant_derive = 1;
5325 /* Check whether an insn is an increment legitimate for a basic induction var.
5326 X is the source of insn P, or a part of it.
5327 MODE is the mode in which X should be interpreted.
5329 DEST_REG is the putative biv, also the destination of the insn.
5330 We accept patterns of these forms:
5331 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5332 REG = INVARIANT + REG
5334 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5335 store the additive term into *INC_VAL, and store the place where
5336 we found the additive term into *LOCATION.
5338 If X is an assignment of an invariant into DEST_REG, we set
5339 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5341 We also want to detect a BIV when it corresponds to a variable
5342 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5343 of the variable may be a PLUS that adds a SUBREG of that variable to
5344 an invariant and then sign- or zero-extends the result of the PLUS
5345 into the variable.
5347 Most GIVs in such cases will be in the promoted mode, since that is the
5348 probably the natural computation mode (and almost certainly the mode
5349 used for addresses) on the machine. So we view the pseudo-reg containing
5350 the variable as the BIV, as if it were simply incremented.
5352 Note that treating the entire pseudo as a BIV will result in making
5353 simple increments to any GIVs based on it. However, if the variable
5354 overflows in its declared mode but not its promoted mode, the result will
5355 be incorrect. This is acceptable if the variable is signed, since
5356 overflows in such cases are undefined, but not if it is unsigned, since
5357 those overflows are defined. So we only check for SIGN_EXTEND and
5358 not ZERO_EXTEND.
5360 If we cannot find a biv, we return 0. */
5362 static int
5363 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
5364 const struct loop *loop;
5365 register rtx x;
5366 enum machine_mode mode;
5367 rtx dest_reg;
5368 rtx p;
5369 rtx *inc_val;
5370 rtx *mult_val;
5371 rtx **location;
5373 register enum rtx_code code;
5374 rtx *argp, arg;
5375 rtx insn, set = 0;
5377 code = GET_CODE (x);
5378 *location = NULL;
5379 switch (code)
5381 case PLUS:
5382 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5383 || (GET_CODE (XEXP (x, 0)) == SUBREG
5384 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5385 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5387 argp = &XEXP (x, 1);
5389 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5390 || (GET_CODE (XEXP (x, 1)) == SUBREG
5391 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5392 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5394 argp = &XEXP (x, 0);
5396 else
5397 return 0;
5399 arg = *argp;
5400 if (loop_invariant_p (loop, arg) != 1)
5401 return 0;
5403 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5404 *mult_val = const1_rtx;
5405 *location = argp;
5406 return 1;
5408 case SUBREG:
5409 /* If this is a SUBREG for a promoted variable, check the inner
5410 value. */
5411 if (SUBREG_PROMOTED_VAR_P (x))
5412 return basic_induction_var (loop, SUBREG_REG (x),
5413 GET_MODE (SUBREG_REG (x)),
5414 dest_reg, p, inc_val, mult_val, location);
5415 return 0;
5417 case REG:
5418 /* If this register is assigned in a previous insn, look at its
5419 source, but don't go outside the loop or past a label. */
5421 /* If this sets a register to itself, we would repeat any previous
5422 biv increment if we applied this strategy blindly. */
5423 if (rtx_equal_p (dest_reg, x))
5424 return 0;
5426 insn = p;
5427 while (1)
5429 rtx dest;
5432 insn = PREV_INSN (insn);
5434 while (insn && GET_CODE (insn) == NOTE
5435 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5437 if (!insn)
5438 break;
5439 set = single_set (insn);
5440 if (set == 0)
5441 break;
5442 dest = SET_DEST (set);
5443 if (dest == x
5444 || (GET_CODE (dest) == SUBREG
5445 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
5446 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
5447 && SUBREG_REG (dest) == x))
5448 return basic_induction_var (loop, SET_SRC (set),
5449 (GET_MODE (SET_SRC (set)) == VOIDmode
5450 ? GET_MODE (x)
5451 : GET_MODE (SET_SRC (set))),
5452 dest_reg, insn,
5453 inc_val, mult_val, location);
5455 while (GET_CODE (dest) == SIGN_EXTRACT
5456 || GET_CODE (dest) == ZERO_EXTRACT
5457 || GET_CODE (dest) == SUBREG
5458 || GET_CODE (dest) == STRICT_LOW_PART)
5459 dest = XEXP (dest, 0);
5460 if (dest == x)
5461 break;
5463 /* Fall through. */
5465 /* Can accept constant setting of biv only when inside inner most loop.
5466 Otherwise, a biv of an inner loop may be incorrectly recognized
5467 as a biv of the outer loop,
5468 causing code to be moved INTO the inner loop. */
5469 case MEM:
5470 if (loop_invariant_p (loop, x) != 1)
5471 return 0;
5472 case CONST_INT:
5473 case SYMBOL_REF:
5474 case CONST:
5475 /* convert_modes aborts if we try to convert to or from CCmode, so just
5476 exclude that case. It is very unlikely that a condition code value
5477 would be a useful iterator anyways. */
5478 if (loop->level == 1
5479 && GET_MODE_CLASS (mode) != MODE_CC
5480 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5482 /* Possible bug here? Perhaps we don't know the mode of X. */
5483 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5484 *mult_val = const0_rtx;
5485 return 1;
5487 else
5488 return 0;
5490 case SIGN_EXTEND:
5491 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5492 dest_reg, p, inc_val, mult_val, location);
5494 case ASHIFTRT:
5495 /* Similar, since this can be a sign extension. */
5496 for (insn = PREV_INSN (p);
5497 (insn && GET_CODE (insn) == NOTE
5498 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5499 insn = PREV_INSN (insn))
5502 if (insn)
5503 set = single_set (insn);
5505 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
5506 && set && SET_DEST (set) == XEXP (x, 0)
5507 && GET_CODE (XEXP (x, 1)) == CONST_INT
5508 && INTVAL (XEXP (x, 1)) >= 0
5509 && GET_CODE (SET_SRC (set)) == ASHIFT
5510 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5511 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
5512 GET_MODE (XEXP (x, 0)),
5513 dest_reg, insn, inc_val, mult_val,
5514 location);
5515 return 0;
5517 default:
5518 return 0;
5522 /* A general induction variable (giv) is any quantity that is a linear
5523 function of a basic induction variable,
5524 i.e. giv = biv * mult_val + add_val.
5525 The coefficients can be any loop invariant quantity.
5526 A giv need not be computed directly from the biv;
5527 it can be computed by way of other givs. */
5529 /* Determine whether X computes a giv.
5530 If it does, return a nonzero value
5531 which is the benefit from eliminating the computation of X;
5532 set *SRC_REG to the register of the biv that it is computed from;
5533 set *ADD_VAL and *MULT_VAL to the coefficients,
5534 such that the value of X is biv * mult + add; */
5536 static int
5537 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
5538 is_addr, pbenefit, addr_mode)
5539 const struct loop *loop;
5540 rtx x;
5541 rtx *src_reg;
5542 rtx *add_val;
5543 rtx *mult_val;
5544 rtx *ext_val;
5545 int is_addr;
5546 int *pbenefit;
5547 enum machine_mode addr_mode;
5549 struct loop_ivs *ivs = LOOP_IVS (loop);
5550 rtx orig_x = x;
5552 /* If this is an invariant, forget it, it isn't a giv. */
5553 if (loop_invariant_p (loop, x) == 1)
5554 return 0;
5556 *pbenefit = 0;
5557 *ext_val = NULL_RTX;
5558 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
5559 if (x == 0)
5560 return 0;
5562 switch (GET_CODE (x))
5564 case USE:
5565 case CONST_INT:
5566 /* Since this is now an invariant and wasn't before, it must be a giv
5567 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5568 with. */
5569 *src_reg = ivs->list->biv->dest_reg;
5570 *mult_val = const0_rtx;
5571 *add_val = x;
5572 break;
5574 case REG:
5575 /* This is equivalent to a BIV. */
5576 *src_reg = x;
5577 *mult_val = const1_rtx;
5578 *add_val = const0_rtx;
5579 break;
5581 case PLUS:
5582 /* Either (plus (biv) (invar)) or
5583 (plus (mult (biv) (invar_1)) (invar_2)). */
5584 if (GET_CODE (XEXP (x, 0)) == MULT)
5586 *src_reg = XEXP (XEXP (x, 0), 0);
5587 *mult_val = XEXP (XEXP (x, 0), 1);
5589 else
5591 *src_reg = XEXP (x, 0);
5592 *mult_val = const1_rtx;
5594 *add_val = XEXP (x, 1);
5595 break;
5597 case MULT:
5598 /* ADD_VAL is zero. */
5599 *src_reg = XEXP (x, 0);
5600 *mult_val = XEXP (x, 1);
5601 *add_val = const0_rtx;
5602 break;
5604 default:
5605 abort ();
5608 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5609 unless they are CONST_INT). */
5610 if (GET_CODE (*add_val) == USE)
5611 *add_val = XEXP (*add_val, 0);
5612 if (GET_CODE (*mult_val) == USE)
5613 *mult_val = XEXP (*mult_val, 0);
5615 if (is_addr)
5616 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
5617 else
5618 *pbenefit += rtx_cost (orig_x, SET);
5620 /* Always return true if this is a giv so it will be detected as such,
5621 even if the benefit is zero or negative. This allows elimination
5622 of bivs that might otherwise not be eliminated. */
5623 return 1;
5626 /* Given an expression, X, try to form it as a linear function of a biv.
5627 We will canonicalize it to be of the form
5628 (plus (mult (BIV) (invar_1))
5629 (invar_2))
5630 with possible degeneracies.
5632 The invariant expressions must each be of a form that can be used as a
5633 machine operand. We surround then with a USE rtx (a hack, but localized
5634 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5635 routine; it is the caller's responsibility to strip them.
5637 If no such canonicalization is possible (i.e., two biv's are used or an
5638 expression that is neither invariant nor a biv or giv), this routine
5639 returns 0.
5641 For a non-zero return, the result will have a code of CONST_INT, USE,
5642 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5644 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5646 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
5647 static rtx sge_plus_constant PARAMS ((rtx, rtx));
5649 static rtx
5650 simplify_giv_expr (loop, x, ext_val, benefit)
5651 const struct loop *loop;
5652 rtx x;
5653 rtx *ext_val;
5654 int *benefit;
5656 struct loop_ivs *ivs = LOOP_IVS (loop);
5657 struct loop_regs *regs = LOOP_REGS (loop);
5658 enum machine_mode mode = GET_MODE (x);
5659 rtx arg0, arg1;
5660 rtx tem;
5662 /* If this is not an integer mode, or if we cannot do arithmetic in this
5663 mode, this can't be a giv. */
5664 if (mode != VOIDmode
5665 && (GET_MODE_CLASS (mode) != MODE_INT
5666 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5667 return NULL_RTX;
5669 switch (GET_CODE (x))
5671 case PLUS:
5672 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5673 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5674 if (arg0 == 0 || arg1 == 0)
5675 return NULL_RTX;
5677 /* Put constant last, CONST_INT last if both constant. */
5678 if ((GET_CODE (arg0) == USE
5679 || GET_CODE (arg0) == CONST_INT)
5680 && ! ((GET_CODE (arg0) == USE
5681 && GET_CODE (arg1) == USE)
5682 || GET_CODE (arg1) == CONST_INT))
5683 tem = arg0, arg0 = arg1, arg1 = tem;
5685 /* Handle addition of zero, then addition of an invariant. */
5686 if (arg1 == const0_rtx)
5687 return arg0;
5688 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5689 switch (GET_CODE (arg0))
5691 case CONST_INT:
5692 case USE:
5693 /* Adding two invariants must result in an invariant, so enclose
5694 addition operation inside a USE and return it. */
5695 if (GET_CODE (arg0) == USE)
5696 arg0 = XEXP (arg0, 0);
5697 if (GET_CODE (arg1) == USE)
5698 arg1 = XEXP (arg1, 0);
5700 if (GET_CODE (arg0) == CONST_INT)
5701 tem = arg0, arg0 = arg1, arg1 = tem;
5702 if (GET_CODE (arg1) == CONST_INT)
5703 tem = sge_plus_constant (arg0, arg1);
5704 else
5705 tem = sge_plus (mode, arg0, arg1);
5707 if (GET_CODE (tem) != CONST_INT)
5708 tem = gen_rtx_USE (mode, tem);
5709 return tem;
5711 case REG:
5712 case MULT:
5713 /* biv + invar or mult + invar. Return sum. */
5714 return gen_rtx_PLUS (mode, arg0, arg1);
5716 case PLUS:
5717 /* (a + invar_1) + invar_2. Associate. */
5718 return
5719 simplify_giv_expr (loop,
5720 gen_rtx_PLUS (mode,
5721 XEXP (arg0, 0),
5722 gen_rtx_PLUS (mode,
5723 XEXP (arg0, 1),
5724 arg1)),
5725 ext_val, benefit);
5727 default:
5728 abort ();
5731 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5732 MULT to reduce cases. */
5733 if (GET_CODE (arg0) == REG)
5734 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5735 if (GET_CODE (arg1) == REG)
5736 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5738 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5739 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5740 Recurse to associate the second PLUS. */
5741 if (GET_CODE (arg1) == MULT)
5742 tem = arg0, arg0 = arg1, arg1 = tem;
5744 if (GET_CODE (arg1) == PLUS)
5745 return
5746 simplify_giv_expr (loop,
5747 gen_rtx_PLUS (mode,
5748 gen_rtx_PLUS (mode, arg0,
5749 XEXP (arg1, 0)),
5750 XEXP (arg1, 1)),
5751 ext_val, benefit);
5753 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5754 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5755 return NULL_RTX;
5757 if (!rtx_equal_p (arg0, arg1))
5758 return NULL_RTX;
5760 return simplify_giv_expr (loop,
5761 gen_rtx_MULT (mode,
5762 XEXP (arg0, 0),
5763 gen_rtx_PLUS (mode,
5764 XEXP (arg0, 1),
5765 XEXP (arg1, 1))),
5766 ext_val, benefit);
5768 case MINUS:
5769 /* Handle "a - b" as "a + b * (-1)". */
5770 return simplify_giv_expr (loop,
5771 gen_rtx_PLUS (mode,
5772 XEXP (x, 0),
5773 gen_rtx_MULT (mode,
5774 XEXP (x, 1),
5775 constm1_rtx)),
5776 ext_val, benefit);
5778 case MULT:
5779 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5780 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5781 if (arg0 == 0 || arg1 == 0)
5782 return NULL_RTX;
5784 /* Put constant last, CONST_INT last if both constant. */
5785 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5786 && GET_CODE (arg1) != CONST_INT)
5787 tem = arg0, arg0 = arg1, arg1 = tem;
5789 /* If second argument is not now constant, not giv. */
5790 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5791 return NULL_RTX;
5793 /* Handle multiply by 0 or 1. */
5794 if (arg1 == const0_rtx)
5795 return const0_rtx;
5797 else if (arg1 == const1_rtx)
5798 return arg0;
5800 switch (GET_CODE (arg0))
5802 case REG:
5803 /* biv * invar. Done. */
5804 return gen_rtx_MULT (mode, arg0, arg1);
5806 case CONST_INT:
5807 /* Product of two constants. */
5808 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5810 case USE:
5811 /* invar * invar is a giv, but attempt to simplify it somehow. */
5812 if (GET_CODE (arg1) != CONST_INT)
5813 return NULL_RTX;
5815 arg0 = XEXP (arg0, 0);
5816 if (GET_CODE (arg0) == MULT)
5818 /* (invar_0 * invar_1) * invar_2. Associate. */
5819 return simplify_giv_expr (loop,
5820 gen_rtx_MULT (mode,
5821 XEXP (arg0, 0),
5822 gen_rtx_MULT (mode,
5823 XEXP (arg0,
5825 arg1)),
5826 ext_val, benefit);
5828 /* Porpagate the MULT expressions to the intermost nodes. */
5829 else if (GET_CODE (arg0) == PLUS)
5831 /* (invar_0 + invar_1) * invar_2. Distribute. */
5832 return simplify_giv_expr (loop,
5833 gen_rtx_PLUS (mode,
5834 gen_rtx_MULT (mode,
5835 XEXP (arg0,
5837 arg1),
5838 gen_rtx_MULT (mode,
5839 XEXP (arg0,
5841 arg1)),
5842 ext_val, benefit);
5844 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
5846 case MULT:
5847 /* (a * invar_1) * invar_2. Associate. */
5848 return simplify_giv_expr (loop,
5849 gen_rtx_MULT (mode,
5850 XEXP (arg0, 0),
5851 gen_rtx_MULT (mode,
5852 XEXP (arg0, 1),
5853 arg1)),
5854 ext_val, benefit);
5856 case PLUS:
5857 /* (a + invar_1) * invar_2. Distribute. */
5858 return simplify_giv_expr (loop,
5859 gen_rtx_PLUS (mode,
5860 gen_rtx_MULT (mode,
5861 XEXP (arg0, 0),
5862 arg1),
5863 gen_rtx_MULT (mode,
5864 XEXP (arg0, 1),
5865 arg1)),
5866 ext_val, benefit);
5868 default:
5869 abort ();
5872 case ASHIFT:
5873 /* Shift by constant is multiply by power of two. */
5874 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5875 return 0;
5877 return
5878 simplify_giv_expr (loop,
5879 gen_rtx_MULT (mode,
5880 XEXP (x, 0),
5881 GEN_INT ((HOST_WIDE_INT) 1
5882 << INTVAL (XEXP (x, 1)))),
5883 ext_val, benefit);
5885 case NEG:
5886 /* "-a" is "a * (-1)" */
5887 return simplify_giv_expr (loop,
5888 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5889 ext_val, benefit);
5891 case NOT:
5892 /* "~a" is "-a - 1". Silly, but easy. */
5893 return simplify_giv_expr (loop,
5894 gen_rtx_MINUS (mode,
5895 gen_rtx_NEG (mode, XEXP (x, 0)),
5896 const1_rtx),
5897 ext_val, benefit);
5899 case USE:
5900 /* Already in proper form for invariant. */
5901 return x;
5903 case SIGN_EXTEND:
5904 case ZERO_EXTEND:
5905 case TRUNCATE:
5906 /* Conditionally recognize extensions of simple IVs. After we've
5907 computed loop traversal counts and verified the range of the
5908 source IV, we'll reevaluate this as a GIV. */
5909 if (*ext_val == NULL_RTX)
5911 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5912 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
5914 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
5915 return arg0;
5918 goto do_default;
5920 case REG:
5921 /* If this is a new register, we can't deal with it. */
5922 if (REGNO (x) >= max_reg_before_loop)
5923 return 0;
5925 /* Check for biv or giv. */
5926 switch (REG_IV_TYPE (ivs, REGNO (x)))
5928 case BASIC_INDUCT:
5929 return x;
5930 case GENERAL_INDUCT:
5932 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
5934 /* Form expression from giv and add benefit. Ensure this giv
5935 can derive another and subtract any needed adjustment if so. */
5937 /* Increasing the benefit here is risky. The only case in which it
5938 is arguably correct is if this is the only use of V. In other
5939 cases, this will artificially inflate the benefit of the current
5940 giv, and lead to suboptimal code. Thus, it is disabled, since
5941 potentially not reducing an only marginally beneficial giv is
5942 less harmful than reducing many givs that are not really
5943 beneficial. */
5945 rtx single_use = regs->array[REGNO (x)].single_usage;
5946 if (single_use && single_use != const0_rtx)
5947 *benefit += v->benefit;
5950 if (v->cant_derive)
5951 return 0;
5953 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
5954 v->src_reg, v->mult_val),
5955 v->add_val);
5957 if (v->derive_adjustment)
5958 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5959 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
5960 if (*ext_val)
5962 if (!v->ext_dependant)
5963 return arg0;
5965 else
5967 *ext_val = v->ext_dependant;
5968 return arg0;
5970 return 0;
5973 default:
5974 do_default:
5975 /* If it isn't an induction variable, and it is invariant, we
5976 may be able to simplify things further by looking through
5977 the bits we just moved outside the loop. */
5978 if (loop_invariant_p (loop, x) == 1)
5980 struct movable *m;
5981 struct loop_movables *movables = LOOP_MOVABLES (loop);
5983 for (m = movables->head; m; m = m->next)
5984 if (rtx_equal_p (x, m->set_dest))
5986 /* Ok, we found a match. Substitute and simplify. */
5988 /* If we match another movable, we must use that, as
5989 this one is going away. */
5990 if (m->match)
5991 return simplify_giv_expr (loop, m->match->set_dest,
5992 ext_val, benefit);
5994 /* If consec is non-zero, this is a member of a group of
5995 instructions that were moved together. We handle this
5996 case only to the point of seeking to the last insn and
5997 looking for a REG_EQUAL. Fail if we don't find one. */
5998 if (m->consec != 0)
6000 int i = m->consec;
6001 tem = m->insn;
6004 tem = NEXT_INSN (tem);
6006 while (--i > 0);
6008 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6009 if (tem)
6010 tem = XEXP (tem, 0);
6012 else
6014 tem = single_set (m->insn);
6015 if (tem)
6016 tem = SET_SRC (tem);
6019 if (tem)
6021 /* What we are most interested in is pointer
6022 arithmetic on invariants -- only take
6023 patterns we may be able to do something with. */
6024 if (GET_CODE (tem) == PLUS
6025 || GET_CODE (tem) == MULT
6026 || GET_CODE (tem) == ASHIFT
6027 || GET_CODE (tem) == CONST_INT
6028 || GET_CODE (tem) == SYMBOL_REF)
6030 tem = simplify_giv_expr (loop, tem, ext_val,
6031 benefit);
6032 if (tem)
6033 return tem;
6035 else if (GET_CODE (tem) == CONST
6036 && GET_CODE (XEXP (tem, 0)) == PLUS
6037 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6038 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6040 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6041 ext_val, benefit);
6042 if (tem)
6043 return tem;
6046 break;
6049 break;
6052 /* Fall through to general case. */
6053 default:
6054 /* If invariant, return as USE (unless CONST_INT).
6055 Otherwise, not giv. */
6056 if (GET_CODE (x) == USE)
6057 x = XEXP (x, 0);
6059 if (loop_invariant_p (loop, x) == 1)
6061 if (GET_CODE (x) == CONST_INT)
6062 return x;
6063 if (GET_CODE (x) == CONST
6064 && GET_CODE (XEXP (x, 0)) == PLUS
6065 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6066 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6067 x = XEXP (x, 0);
6068 return gen_rtx_USE (mode, x);
6070 else
6071 return 0;
6075 /* This routine folds invariants such that there is only ever one
6076 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6078 static rtx
6079 sge_plus_constant (x, c)
6080 rtx x, c;
6082 if (GET_CODE (x) == CONST_INT)
6083 return GEN_INT (INTVAL (x) + INTVAL (c));
6084 else if (GET_CODE (x) != PLUS)
6085 return gen_rtx_PLUS (GET_MODE (x), x, c);
6086 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6088 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6089 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6091 else if (GET_CODE (XEXP (x, 0)) == PLUS
6092 || GET_CODE (XEXP (x, 1)) != PLUS)
6094 return gen_rtx_PLUS (GET_MODE (x),
6095 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6097 else
6099 return gen_rtx_PLUS (GET_MODE (x),
6100 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6104 static rtx
6105 sge_plus (mode, x, y)
6106 enum machine_mode mode;
6107 rtx x, y;
6109 while (GET_CODE (y) == PLUS)
6111 rtx a = XEXP (y, 0);
6112 if (GET_CODE (a) == CONST_INT)
6113 x = sge_plus_constant (x, a);
6114 else
6115 x = gen_rtx_PLUS (mode, x, a);
6116 y = XEXP (y, 1);
6118 if (GET_CODE (y) == CONST_INT)
6119 x = sge_plus_constant (x, y);
6120 else
6121 x = gen_rtx_PLUS (mode, x, y);
6122 return x;
6125 /* Help detect a giv that is calculated by several consecutive insns;
6126 for example,
6127 giv = biv * M
6128 giv = giv + A
6129 The caller has already identified the first insn P as having a giv as dest;
6130 we check that all other insns that set the same register follow
6131 immediately after P, that they alter nothing else,
6132 and that the result of the last is still a giv.
6134 The value is 0 if the reg set in P is not really a giv.
6135 Otherwise, the value is the amount gained by eliminating
6136 all the consecutive insns that compute the value.
6138 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6139 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6141 The coefficients of the ultimate giv value are stored in
6142 *MULT_VAL and *ADD_VAL. */
6144 static int
6145 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6146 add_val, mult_val, ext_val, last_consec_insn)
6147 const struct loop *loop;
6148 int first_benefit;
6149 rtx p;
6150 rtx src_reg;
6151 rtx dest_reg;
6152 rtx *add_val;
6153 rtx *mult_val;
6154 rtx *ext_val;
6155 rtx *last_consec_insn;
6157 struct loop_ivs *ivs = LOOP_IVS (loop);
6158 struct loop_regs *regs = LOOP_REGS (loop);
6159 int count;
6160 enum rtx_code code;
6161 int benefit;
6162 rtx temp;
6163 rtx set;
6165 /* Indicate that this is a giv so that we can update the value produced in
6166 each insn of the multi-insn sequence.
6168 This induction structure will be used only by the call to
6169 general_induction_var below, so we can allocate it on our stack.
6170 If this is a giv, our caller will replace the induct var entry with
6171 a new induction structure. */
6172 struct induction *v;
6174 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6175 return 0;
6177 v = (struct induction *) alloca (sizeof (struct induction));
6178 v->src_reg = src_reg;
6179 v->mult_val = *mult_val;
6180 v->add_val = *add_val;
6181 v->benefit = first_benefit;
6182 v->cant_derive = 0;
6183 v->derive_adjustment = 0;
6184 v->ext_dependant = NULL_RTX;
6186 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6187 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6189 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6191 while (count > 0)
6193 p = NEXT_INSN (p);
6194 code = GET_CODE (p);
6196 /* If libcall, skip to end of call sequence. */
6197 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6198 p = XEXP (temp, 0);
6200 if (code == INSN
6201 && (set = single_set (p))
6202 && GET_CODE (SET_DEST (set)) == REG
6203 && SET_DEST (set) == dest_reg
6204 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6205 add_val, mult_val, ext_val, 0,
6206 &benefit, VOIDmode)
6207 /* Giv created by equivalent expression. */
6208 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6209 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6210 add_val, mult_val, ext_val, 0,
6211 &benefit, VOIDmode)))
6212 && src_reg == v->src_reg)
6214 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6215 benefit += libcall_benefit (p);
6217 count--;
6218 v->mult_val = *mult_val;
6219 v->add_val = *add_val;
6220 v->benefit += benefit;
6222 else if (code != NOTE)
6224 /* Allow insns that set something other than this giv to a
6225 constant. Such insns are needed on machines which cannot
6226 include long constants and should not disqualify a giv. */
6227 if (code == INSN
6228 && (set = single_set (p))
6229 && SET_DEST (set) != dest_reg
6230 && CONSTANT_P (SET_SRC (set)))
6231 continue;
6233 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6234 return 0;
6238 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6239 *last_consec_insn = p;
6240 return v->benefit;
6243 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6244 represented by G1. If no such expression can be found, or it is clear that
6245 it cannot possibly be a valid address, 0 is returned.
6247 To perform the computation, we note that
6248 G1 = x * v + a and
6249 G2 = y * v + b
6250 where `v' is the biv.
6252 So G2 = (y/b) * G1 + (b - a*y/x).
6254 Note that MULT = y/x.
6256 Update: A and B are now allowed to be additive expressions such that
6257 B contains all variables in A. That is, computing B-A will not require
6258 subtracting variables. */
6260 static rtx
6261 express_from_1 (a, b, mult)
6262 rtx a, b, mult;
6264 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6266 if (mult == const0_rtx)
6267 return b;
6269 /* If MULT is not 1, we cannot handle A with non-constants, since we
6270 would then be required to subtract multiples of the registers in A.
6271 This is theoretically possible, and may even apply to some Fortran
6272 constructs, but it is a lot of work and we do not attempt it here. */
6274 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6275 return NULL_RTX;
6277 /* In general these structures are sorted top to bottom (down the PLUS
6278 chain), but not left to right across the PLUS. If B is a higher
6279 order giv than A, we can strip one level and recurse. If A is higher
6280 order, we'll eventually bail out, but won't know that until the end.
6281 If they are the same, we'll strip one level around this loop. */
6283 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6285 rtx ra, rb, oa, ob, tmp;
6287 ra = XEXP (a, 0), oa = XEXP (a, 1);
6288 if (GET_CODE (ra) == PLUS)
6289 tmp = ra, ra = oa, oa = tmp;
6291 rb = XEXP (b, 0), ob = XEXP (b, 1);
6292 if (GET_CODE (rb) == PLUS)
6293 tmp = rb, rb = ob, ob = tmp;
6295 if (rtx_equal_p (ra, rb))
6296 /* We matched: remove one reg completely. */
6297 a = oa, b = ob;
6298 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6299 /* An alternate match. */
6300 a = oa, b = rb;
6301 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6302 /* An alternate match. */
6303 a = ra, b = ob;
6304 else
6306 /* Indicates an extra register in B. Strip one level from B and
6307 recurse, hoping B was the higher order expression. */
6308 ob = express_from_1 (a, ob, mult);
6309 if (ob == NULL_RTX)
6310 return NULL_RTX;
6311 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6315 /* Here we are at the last level of A, go through the cases hoping to
6316 get rid of everything but a constant. */
6318 if (GET_CODE (a) == PLUS)
6320 rtx ra, oa;
6322 ra = XEXP (a, 0), oa = XEXP (a, 1);
6323 if (rtx_equal_p (oa, b))
6324 oa = ra;
6325 else if (!rtx_equal_p (ra, b))
6326 return NULL_RTX;
6328 if (GET_CODE (oa) != CONST_INT)
6329 return NULL_RTX;
6331 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6333 else if (GET_CODE (a) == CONST_INT)
6335 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6337 else if (CONSTANT_P (a))
6339 return simplify_gen_binary (MINUS, GET_MODE (b) != VOIDmode ? GET_MODE (b) : GET_MODE (a), const0_rtx, a);
6341 else if (GET_CODE (b) == PLUS)
6343 if (rtx_equal_p (a, XEXP (b, 0)))
6344 return XEXP (b, 1);
6345 else if (rtx_equal_p (a, XEXP (b, 1)))
6346 return XEXP (b, 0);
6347 else
6348 return NULL_RTX;
6350 else if (rtx_equal_p (a, b))
6351 return const0_rtx;
6353 return NULL_RTX;
6357 express_from (g1, g2)
6358 struct induction *g1, *g2;
6360 rtx mult, add;
6362 /* The value that G1 will be multiplied by must be a constant integer. Also,
6363 the only chance we have of getting a valid address is if b*c/a (see above
6364 for notation) is also an integer. */
6365 if (GET_CODE (g1->mult_val) == CONST_INT
6366 && GET_CODE (g2->mult_val) == CONST_INT)
6368 if (g1->mult_val == const0_rtx
6369 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6370 return NULL_RTX;
6371 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6373 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6374 mult = const1_rtx;
6375 else
6377 /* ??? Find out if the one is a multiple of the other? */
6378 return NULL_RTX;
6381 add = express_from_1 (g1->add_val, g2->add_val, mult);
6382 if (add == NULL_RTX)
6384 /* Failed. If we've got a multiplication factor between G1 and G2,
6385 scale G1's addend and try again. */
6386 if (INTVAL (mult) > 1)
6388 rtx g1_add_val = g1->add_val;
6389 if (GET_CODE (g1_add_val) == MULT
6390 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6392 HOST_WIDE_INT m;
6393 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6394 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6395 XEXP (g1_add_val, 0), GEN_INT (m));
6397 else
6399 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6400 mult);
6403 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6406 if (add == NULL_RTX)
6407 return NULL_RTX;
6409 /* Form simplified final result. */
6410 if (mult == const0_rtx)
6411 return add;
6412 else if (mult == const1_rtx)
6413 mult = g1->dest_reg;
6414 else
6415 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6417 if (add == const0_rtx)
6418 return mult;
6419 else
6421 if (GET_CODE (add) == PLUS
6422 && CONSTANT_P (XEXP (add, 1)))
6424 rtx tem = XEXP (add, 1);
6425 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
6426 add = tem;
6429 return gen_rtx_PLUS (g2->mode, mult, add);
6433 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6434 represented by G1. This indicates that G2 should be combined with G1 and
6435 that G2 can use (either directly or via an address expression) a register
6436 used to represent G1. */
6438 static rtx
6439 combine_givs_p (g1, g2)
6440 struct induction *g1, *g2;
6442 rtx comb, ret;
6444 /* With the introduction of ext dependant givs, we must care for modes.
6445 G2 must not use a wider mode than G1. */
6446 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
6447 return NULL_RTX;
6449 ret = comb = express_from (g1, g2);
6450 if (comb == NULL_RTX)
6451 return NULL_RTX;
6452 if (g1->mode != g2->mode)
6453 ret = gen_lowpart (g2->mode, comb);
6455 /* If these givs are identical, they can be combined. We use the results
6456 of express_from because the addends are not in a canonical form, so
6457 rtx_equal_p is a weaker test. */
6458 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
6459 combination to be the other way round. */
6460 if (comb == g1->dest_reg
6461 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
6463 return ret;
6466 /* If G2 can be expressed as a function of G1 and that function is valid
6467 as an address and no more expensive than using a register for G2,
6468 the expression of G2 in terms of G1 can be used. */
6469 if (ret != NULL_RTX
6470 && g2->giv_type == DEST_ADDR
6471 && memory_address_p (GET_MODE (g2->mem), ret)
6472 /* ??? Looses, especially with -fforce-addr, where *g2->location
6473 will always be a register, and so anything more complicated
6474 gets discarded. */
6475 #if 0
6476 #ifdef ADDRESS_COST
6477 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6478 #else
6479 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6480 #endif
6481 #endif
6484 return ret;
6487 return NULL_RTX;
6490 /* Check each extension dependant giv in this class to see if its
6491 root biv is safe from wrapping in the interior mode, which would
6492 make the giv illegal. */
6494 static void
6495 check_ext_dependant_givs (bl, loop_info)
6496 struct iv_class *bl;
6497 struct loop_info *loop_info;
6499 int ze_ok = 0, se_ok = 0, info_ok = 0;
6500 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
6501 HOST_WIDE_INT start_val;
6502 unsigned HOST_WIDE_INT u_end_val = 0;
6503 unsigned HOST_WIDE_INT u_start_val = 0;
6504 rtx incr = pc_rtx;
6505 struct induction *v;
6507 /* Make sure the iteration data is available. We must have
6508 constants in order to be certain of no overflow. */
6509 /* ??? An unknown iteration count with an increment of +-1
6510 combined with friendly exit tests of against an invariant
6511 value is also ameanable to optimization. Not implemented. */
6512 if (loop_info->n_iterations > 0
6513 && bl->initial_value
6514 && GET_CODE (bl->initial_value) == CONST_INT
6515 && (incr = biv_total_increment (bl))
6516 && GET_CODE (incr) == CONST_INT
6517 /* Make sure the host can represent the arithmetic. */
6518 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
6520 unsigned HOST_WIDE_INT abs_incr, total_incr;
6521 HOST_WIDE_INT s_end_val;
6522 int neg_incr;
6524 info_ok = 1;
6525 start_val = INTVAL (bl->initial_value);
6526 u_start_val = start_val;
6528 neg_incr = 0, abs_incr = INTVAL (incr);
6529 if (INTVAL (incr) < 0)
6530 neg_incr = 1, abs_incr = -abs_incr;
6531 total_incr = abs_incr * loop_info->n_iterations;
6533 /* Check for host arithmatic overflow. */
6534 if (total_incr / loop_info->n_iterations == abs_incr)
6536 unsigned HOST_WIDE_INT u_max;
6537 HOST_WIDE_INT s_max;
6539 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
6540 s_end_val = u_end_val;
6541 u_max = GET_MODE_MASK (biv_mode);
6542 s_max = u_max >> 1;
6544 /* Check zero extension of biv ok. */
6545 if (start_val >= 0
6546 /* Check for host arithmatic overflow. */
6547 && (neg_incr
6548 ? u_end_val < u_start_val
6549 : u_end_val > u_start_val)
6550 /* Check for target arithmetic overflow. */
6551 && (neg_incr
6552 ? 1 /* taken care of with host overflow */
6553 : u_end_val <= u_max))
6555 ze_ok = 1;
6558 /* Check sign extension of biv ok. */
6559 /* ??? While it is true that overflow with signed and pointer
6560 arithmetic is undefined, I fear too many programmers don't
6561 keep this fact in mind -- myself included on occasion.
6562 So leave alone with the signed overflow optimizations. */
6563 if (start_val >= -s_max - 1
6564 /* Check for host arithmatic overflow. */
6565 && (neg_incr
6566 ? s_end_val < start_val
6567 : s_end_val > start_val)
6568 /* Check for target arithmetic overflow. */
6569 && (neg_incr
6570 ? s_end_val >= -s_max - 1
6571 : s_end_val <= s_max))
6573 se_ok = 1;
6578 /* Invalidate givs that fail the tests. */
6579 for (v = bl->giv; v; v = v->next_iv)
6580 if (v->ext_dependant)
6582 enum rtx_code code = GET_CODE (v->ext_dependant);
6583 int ok = 0;
6585 switch (code)
6587 case SIGN_EXTEND:
6588 ok = se_ok;
6589 break;
6590 case ZERO_EXTEND:
6591 ok = ze_ok;
6592 break;
6594 case TRUNCATE:
6595 /* We don't know whether this value is being used as either
6596 signed or unsigned, so to safely truncate we must satisfy
6597 both. The initial check here verifies the BIV itself;
6598 once that is successful we may check its range wrt the
6599 derived GIV. */
6600 if (se_ok && ze_ok)
6602 enum machine_mode outer_mode = GET_MODE (v->ext_dependant);
6603 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
6605 /* We know from the above that both endpoints are nonnegative,
6606 and that there is no wrapping. Verify that both endpoints
6607 are within the (signed) range of the outer mode. */
6608 if (u_start_val <= max && u_end_val <= max)
6609 ok = 1;
6611 break;
6613 default:
6614 abort ();
6617 if (ok)
6619 if (loop_dump_stream)
6621 fprintf (loop_dump_stream,
6622 "Verified ext dependant giv at %d of reg %d\n",
6623 INSN_UID (v->insn), bl->regno);
6626 else
6628 if (loop_dump_stream)
6630 const char *why;
6632 if (info_ok)
6633 why = "biv iteration values overflowed";
6634 else
6636 if (incr == pc_rtx)
6637 incr = biv_total_increment (bl);
6638 if (incr == const1_rtx)
6639 why = "biv iteration info incomplete; incr by 1";
6640 else
6641 why = "biv iteration info incomplete";
6644 fprintf (loop_dump_stream,
6645 "Failed ext dependant giv at %d, %s\n",
6646 INSN_UID (v->insn), why);
6648 v->ignore = 1;
6653 /* Generate a version of VALUE in a mode appropriate for initializing V. */
6656 extend_value_for_giv (v, value)
6657 struct induction *v;
6658 rtx value;
6660 rtx ext_dep = v->ext_dependant;
6662 if (! ext_dep)
6663 return value;
6665 /* Recall that check_ext_dependant_givs verified that the known bounds
6666 of a biv did not overflow or wrap with respect to the extension for
6667 the giv. Therefore, constants need no additional adjustment. */
6668 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
6669 return value;
6671 /* Otherwise, we must adjust the value to compensate for the
6672 differing modes of the biv and the giv. */
6673 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
6676 struct combine_givs_stats
6678 int giv_number;
6679 int total_benefit;
6682 static int
6683 cmp_combine_givs_stats (xp, yp)
6684 const PTR xp;
6685 const PTR yp;
6687 const struct combine_givs_stats * const x =
6688 (const struct combine_givs_stats *) xp;
6689 const struct combine_givs_stats * const y =
6690 (const struct combine_givs_stats *) yp;
6691 int d;
6692 d = y->total_benefit - x->total_benefit;
6693 /* Stabilize the sort. */
6694 if (!d)
6695 d = x->giv_number - y->giv_number;
6696 return d;
6699 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6700 any other. If so, point SAME to the giv combined with and set NEW_REG to
6701 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6702 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6704 static void
6705 combine_givs (regs, bl)
6706 struct loop_regs *regs;
6707 struct iv_class *bl;
6709 /* Additional benefit to add for being combined multiple times. */
6710 const int extra_benefit = 3;
6712 struct induction *g1, *g2, **giv_array;
6713 int i, j, k, giv_count;
6714 struct combine_givs_stats *stats;
6715 rtx *can_combine;
6717 /* Count givs, because bl->giv_count is incorrect here. */
6718 giv_count = 0;
6719 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6720 if (!g1->ignore)
6721 giv_count++;
6723 giv_array
6724 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6725 i = 0;
6726 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6727 if (!g1->ignore)
6728 giv_array[i++] = g1;
6730 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
6731 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
6733 for (i = 0; i < giv_count; i++)
6735 int this_benefit;
6736 rtx single_use;
6738 g1 = giv_array[i];
6739 stats[i].giv_number = i;
6741 /* If a DEST_REG GIV is used only once, do not allow it to combine
6742 with anything, for in doing so we will gain nothing that cannot
6743 be had by simply letting the GIV with which we would have combined
6744 to be reduced on its own. The losage shows up in particular with
6745 DEST_ADDR targets on hosts with reg+reg addressing, though it can
6746 be seen elsewhere as well. */
6747 if (g1->giv_type == DEST_REG
6748 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
6749 && single_use != const0_rtx)
6750 continue;
6752 this_benefit = g1->benefit;
6753 /* Add an additional weight for zero addends. */
6754 if (g1->no_const_addval)
6755 this_benefit += 1;
6757 for (j = 0; j < giv_count; j++)
6759 rtx this_combine;
6761 g2 = giv_array[j];
6762 if (g1 != g2
6763 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6765 can_combine[i * giv_count + j] = this_combine;
6766 this_benefit += g2->benefit + extra_benefit;
6769 stats[i].total_benefit = this_benefit;
6772 /* Iterate, combining until we can't. */
6773 restart:
6774 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
6776 if (loop_dump_stream)
6778 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6779 for (k = 0; k < giv_count; k++)
6781 g1 = giv_array[stats[k].giv_number];
6782 if (!g1->combined_with && !g1->same)
6783 fprintf (loop_dump_stream, " {%d, %d}",
6784 INSN_UID (giv_array[stats[k].giv_number]->insn),
6785 stats[k].total_benefit);
6787 putc ('\n', loop_dump_stream);
6790 for (k = 0; k < giv_count; k++)
6792 int g1_add_benefit = 0;
6794 i = stats[k].giv_number;
6795 g1 = giv_array[i];
6797 /* If it has already been combined, skip. */
6798 if (g1->combined_with || g1->same)
6799 continue;
6801 for (j = 0; j < giv_count; j++)
6803 g2 = giv_array[j];
6804 if (g1 != g2 && can_combine[i * giv_count + j]
6805 /* If it has already been combined, skip. */
6806 && ! g2->same && ! g2->combined_with)
6808 int l;
6810 g2->new_reg = can_combine[i * giv_count + j];
6811 g2->same = g1;
6812 g1->combined_with++;
6813 g1->lifetime += g2->lifetime;
6815 g1_add_benefit += g2->benefit;
6817 /* ??? The new final_[bg]iv_value code does a much better job
6818 of finding replaceable giv's, and hence this code may no
6819 longer be necessary. */
6820 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6821 g1_add_benefit -= copy_cost;
6823 /* To help optimize the next set of combinations, remove
6824 this giv from the benefits of other potential mates. */
6825 for (l = 0; l < giv_count; ++l)
6827 int m = stats[l].giv_number;
6828 if (can_combine[m * giv_count + j])
6829 stats[l].total_benefit -= g2->benefit + extra_benefit;
6832 if (loop_dump_stream)
6833 fprintf (loop_dump_stream,
6834 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
6835 INSN_UID (g2->insn), INSN_UID (g1->insn),
6836 g1->benefit, g1_add_benefit, g1->lifetime);
6840 /* To help optimize the next set of combinations, remove
6841 this giv from the benefits of other potential mates. */
6842 if (g1->combined_with)
6844 for (j = 0; j < giv_count; ++j)
6846 int m = stats[j].giv_number;
6847 if (can_combine[m * giv_count + i])
6848 stats[j].total_benefit -= g1->benefit + extra_benefit;
6851 g1->benefit += g1_add_benefit;
6853 /* We've finished with this giv, and everything it touched.
6854 Restart the combination so that proper weights for the
6855 rest of the givs are properly taken into account. */
6856 /* ??? Ideally we would compact the arrays at this point, so
6857 as to not cover old ground. But sanely compacting
6858 can_combine is tricky. */
6859 goto restart;
6863 /* Clean up. */
6864 free (stats);
6865 free (can_combine);
6868 /* Generate sequence for REG = B * M + A. */
6870 static rtx
6871 gen_add_mult (b, m, a, reg)
6872 rtx b; /* initial value of basic induction variable */
6873 rtx m; /* multiplicative constant */
6874 rtx a; /* additive constant */
6875 rtx reg; /* destination register */
6877 rtx seq;
6878 rtx result;
6880 start_sequence ();
6881 /* Use unsigned arithmetic. */
6882 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
6883 if (reg != result)
6884 emit_move_insn (reg, result);
6885 seq = gen_sequence ();
6886 end_sequence ();
6888 return seq;
6892 /* Update registers created in insn sequence SEQ. */
6894 static void
6895 loop_regs_update (loop, seq)
6896 const struct loop *loop ATTRIBUTE_UNUSED;
6897 rtx seq;
6899 /* Update register info for alias analysis. */
6901 if (GET_CODE (seq) == SEQUENCE)
6903 int i;
6904 for (i = 0; i < XVECLEN (seq, 0); ++i)
6906 rtx set = single_set (XVECEXP (seq, 0, i));
6907 if (set && GET_CODE (SET_DEST (set)) == REG)
6908 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6911 else
6913 rtx set = single_set (seq);
6914 if (set && GET_CODE (SET_DEST (set)) == REG)
6915 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6920 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
6922 void
6923 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
6924 const struct loop *loop;
6925 rtx b; /* initial value of basic induction variable */
6926 rtx m; /* multiplicative constant */
6927 rtx a; /* additive constant */
6928 rtx reg; /* destination register */
6929 basic_block before_bb;
6930 rtx before_insn;
6932 rtx seq;
6934 if (! before_insn)
6936 loop_iv_add_mult_hoist (loop, b, m, a, reg);
6937 return;
6940 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6941 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6943 /* Increase the lifetime of any invariants moved further in code. */
6944 update_reg_last_use (a, before_insn);
6945 update_reg_last_use (b, before_insn);
6946 update_reg_last_use (m, before_insn);
6948 loop_insn_emit_before (loop, before_bb, before_insn, seq);
6950 /* It is possible that the expansion created lots of new registers.
6951 Iterate over the sequence we just created and record them all. */
6952 loop_regs_update (loop, seq);
6956 /* Emit insns in loop pre-header to set REG = B * M + A. */
6958 void
6959 loop_iv_add_mult_sink (loop, b, m, a, reg)
6960 const struct loop *loop;
6961 rtx b; /* initial value of basic induction variable */
6962 rtx m; /* multiplicative constant */
6963 rtx a; /* additive constant */
6964 rtx reg; /* destination register */
6966 rtx seq;
6968 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6969 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6971 /* Increase the lifetime of any invariants moved further in code.
6972 ???? Is this really necessary? */
6973 update_reg_last_use (a, loop->sink);
6974 update_reg_last_use (b, loop->sink);
6975 update_reg_last_use (m, loop->sink);
6977 loop_insn_sink (loop, seq);
6979 /* It is possible that the expansion created lots of new registers.
6980 Iterate over the sequence we just created and record them all. */
6981 loop_regs_update (loop, seq);
6985 /* Emit insns after loop to set REG = B * M + A. */
6987 void
6988 loop_iv_add_mult_hoist (loop, b, m, a, reg)
6989 const struct loop *loop;
6990 rtx b; /* initial value of basic induction variable */
6991 rtx m; /* multiplicative constant */
6992 rtx a; /* additive constant */
6993 rtx reg; /* destination register */
6995 rtx seq;
6997 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6998 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7000 loop_insn_hoist (loop, seq);
7002 /* It is possible that the expansion created lots of new registers.
7003 Iterate over the sequence we just created and record them all. */
7004 loop_regs_update (loop, seq);
7009 /* Similar to gen_add_mult, but compute cost rather than generating
7010 sequence. */
7012 static int
7013 iv_add_mult_cost (b, m, a, reg)
7014 rtx b; /* initial value of basic induction variable */
7015 rtx m; /* multiplicative constant */
7016 rtx a; /* additive constant */
7017 rtx reg; /* destination register */
7019 int cost = 0;
7020 rtx last, result;
7022 start_sequence ();
7023 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7024 if (reg != result)
7025 emit_move_insn (reg, result);
7026 last = get_last_insn ();
7027 while (last)
7029 rtx t = single_set (last);
7030 if (t)
7031 cost += rtx_cost (SET_SRC (t), SET);
7032 last = PREV_INSN (last);
7034 end_sequence ();
7035 return cost;
7038 /* Test whether A * B can be computed without
7039 an actual multiply insn. Value is 1 if so. */
7041 static int
7042 product_cheap_p (a, b)
7043 rtx a;
7044 rtx b;
7046 int i;
7047 rtx tmp;
7048 int win = 1;
7050 /* If only one is constant, make it B. */
7051 if (GET_CODE (a) == CONST_INT)
7052 tmp = a, a = b, b = tmp;
7054 /* If first constant, both constant, so don't need multiply. */
7055 if (GET_CODE (a) == CONST_INT)
7056 return 1;
7058 /* If second not constant, neither is constant, so would need multiply. */
7059 if (GET_CODE (b) != CONST_INT)
7060 return 0;
7062 /* One operand is constant, so might not need multiply insn. Generate the
7063 code for the multiply and see if a call or multiply, or long sequence
7064 of insns is generated. */
7066 start_sequence ();
7067 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7068 tmp = gen_sequence ();
7069 end_sequence ();
7071 if (GET_CODE (tmp) == SEQUENCE)
7073 if (XVEC (tmp, 0) == 0)
7074 win = 1;
7075 else if (XVECLEN (tmp, 0) > 3)
7076 win = 0;
7077 else
7078 for (i = 0; i < XVECLEN (tmp, 0); i++)
7080 rtx insn = XVECEXP (tmp, 0, i);
7082 if (GET_CODE (insn) != INSN
7083 || (GET_CODE (PATTERN (insn)) == SET
7084 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7085 || (GET_CODE (PATTERN (insn)) == PARALLEL
7086 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7087 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7089 win = 0;
7090 break;
7094 else if (GET_CODE (tmp) == SET
7095 && GET_CODE (SET_SRC (tmp)) == MULT)
7096 win = 0;
7097 else if (GET_CODE (tmp) == PARALLEL
7098 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7099 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7100 win = 0;
7102 return win;
7105 /* Check to see if loop can be terminated by a "decrement and branch until
7106 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7107 Also try reversing an increment loop to a decrement loop
7108 to see if the optimization can be performed.
7109 Value is nonzero if optimization was performed. */
7111 /* This is useful even if the architecture doesn't have such an insn,
7112 because it might change a loops which increments from 0 to n to a loop
7113 which decrements from n to 0. A loop that decrements to zero is usually
7114 faster than one that increments from zero. */
7116 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7117 such as approx_final_value, biv_total_increment, loop_iterations, and
7118 final_[bg]iv_value. */
7120 static int
7121 check_dbra_loop (loop, insn_count)
7122 struct loop *loop;
7123 int insn_count;
7125 struct loop_info *loop_info = LOOP_INFO (loop);
7126 struct loop_regs *regs = LOOP_REGS (loop);
7127 struct loop_ivs *ivs = LOOP_IVS (loop);
7128 struct iv_class *bl;
7129 rtx reg;
7130 rtx jump_label;
7131 rtx final_value;
7132 rtx start_value;
7133 rtx new_add_val;
7134 rtx comparison;
7135 rtx before_comparison;
7136 rtx p;
7137 rtx jump;
7138 rtx first_compare;
7139 int compare_and_branch;
7140 rtx loop_start = loop->start;
7141 rtx loop_end = loop->end;
7143 /* If last insn is a conditional branch, and the insn before tests a
7144 register value, try to optimize it. Otherwise, we can't do anything. */
7146 jump = PREV_INSN (loop_end);
7147 comparison = get_condition_for_loop (loop, jump);
7148 if (comparison == 0)
7149 return 0;
7150 if (!onlyjump_p (jump))
7151 return 0;
7153 /* Try to compute whether the compare/branch at the loop end is one or
7154 two instructions. */
7155 get_condition (jump, &first_compare);
7156 if (first_compare == jump)
7157 compare_and_branch = 1;
7158 else if (first_compare == prev_nonnote_insn (jump))
7159 compare_and_branch = 2;
7160 else
7161 return 0;
7164 /* If more than one condition is present to control the loop, then
7165 do not proceed, as this function does not know how to rewrite
7166 loop tests with more than one condition.
7168 Look backwards from the first insn in the last comparison
7169 sequence and see if we've got another comparison sequence. */
7171 rtx jump1;
7172 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7173 if (GET_CODE (jump1) == JUMP_INSN)
7174 return 0;
7177 /* Check all of the bivs to see if the compare uses one of them.
7178 Skip biv's set more than once because we can't guarantee that
7179 it will be zero on the last iteration. Also skip if the biv is
7180 used between its update and the test insn. */
7182 for (bl = ivs->list; bl; bl = bl->next)
7184 if (bl->biv_count == 1
7185 && ! bl->biv->maybe_multiple
7186 && bl->biv->dest_reg == XEXP (comparison, 0)
7187 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7188 first_compare))
7189 break;
7192 if (! bl)
7193 return 0;
7195 /* Look for the case where the basic induction variable is always
7196 nonnegative, and equals zero on the last iteration.
7197 In this case, add a reg_note REG_NONNEG, which allows the
7198 m68k DBRA instruction to be used. */
7200 if (((GET_CODE (comparison) == GT
7201 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7202 && INTVAL (XEXP (comparison, 1)) == -1)
7203 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7204 && GET_CODE (bl->biv->add_val) == CONST_INT
7205 && INTVAL (bl->biv->add_val) < 0)
7207 /* Initial value must be greater than 0,
7208 init_val % -dec_value == 0 to ensure that it equals zero on
7209 the last iteration */
7211 if (GET_CODE (bl->initial_value) == CONST_INT
7212 && INTVAL (bl->initial_value) > 0
7213 && (INTVAL (bl->initial_value)
7214 % (-INTVAL (bl->biv->add_val))) == 0)
7216 /* register always nonnegative, add REG_NOTE to branch */
7217 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7218 REG_NOTES (jump)
7219 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7220 REG_NOTES (jump));
7221 bl->nonneg = 1;
7223 return 1;
7226 /* If the decrement is 1 and the value was tested as >= 0 before
7227 the loop, then we can safely optimize. */
7228 for (p = loop_start; p; p = PREV_INSN (p))
7230 if (GET_CODE (p) == CODE_LABEL)
7231 break;
7232 if (GET_CODE (p) != JUMP_INSN)
7233 continue;
7235 before_comparison = get_condition_for_loop (loop, p);
7236 if (before_comparison
7237 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7238 && GET_CODE (before_comparison) == LT
7239 && XEXP (before_comparison, 1) == const0_rtx
7240 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7241 && INTVAL (bl->biv->add_val) == -1)
7243 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7244 REG_NOTES (jump)
7245 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7246 REG_NOTES (jump));
7247 bl->nonneg = 1;
7249 return 1;
7253 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7254 && INTVAL (bl->biv->add_val) > 0)
7256 /* Try to change inc to dec, so can apply above optimization. */
7257 /* Can do this if:
7258 all registers modified are induction variables or invariant,
7259 all memory references have non-overlapping addresses
7260 (obviously true if only one write)
7261 allow 2 insns for the compare/jump at the end of the loop. */
7262 /* Also, we must avoid any instructions which use both the reversed
7263 biv and another biv. Such instructions will fail if the loop is
7264 reversed. We meet this condition by requiring that either
7265 no_use_except_counting is true, or else that there is only
7266 one biv. */
7267 int num_nonfixed_reads = 0;
7268 /* 1 if the iteration var is used only to count iterations. */
7269 int no_use_except_counting = 0;
7270 /* 1 if the loop has no memory store, or it has a single memory store
7271 which is reversible. */
7272 int reversible_mem_store = 1;
7274 if (bl->giv_count == 0 && ! loop->exit_count)
7276 rtx bivreg = regno_reg_rtx[bl->regno];
7277 struct iv_class *blt;
7279 /* If there are no givs for this biv, and the only exit is the
7280 fall through at the end of the loop, then
7281 see if perhaps there are no uses except to count. */
7282 no_use_except_counting = 1;
7283 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7284 if (INSN_P (p))
7286 rtx set = single_set (p);
7288 if (set && GET_CODE (SET_DEST (set)) == REG
7289 && REGNO (SET_DEST (set)) == bl->regno)
7290 /* An insn that sets the biv is okay. */
7292 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7293 || p == prev_nonnote_insn (loop_end))
7294 && reg_mentioned_p (bivreg, PATTERN (p)))
7296 /* If either of these insns uses the biv and sets a pseudo
7297 that has more than one usage, then the biv has uses
7298 other than counting since it's used to derive a value
7299 that is used more than one time. */
7300 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7301 regs);
7302 if (regs->multiple_uses)
7304 no_use_except_counting = 0;
7305 break;
7308 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7310 no_use_except_counting = 0;
7311 break;
7315 /* A biv has uses besides counting if it is used to set another biv. */
7316 for (blt = ivs->list; blt; blt = blt->next)
7317 if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
7319 no_use_except_counting = 0;
7320 break;
7324 if (no_use_except_counting)
7325 /* No need to worry about MEMs. */
7327 else if (loop_info->num_mem_sets <= 1)
7329 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7330 if (INSN_P (p))
7331 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
7333 /* If the loop has a single store, and the destination address is
7334 invariant, then we can't reverse the loop, because this address
7335 might then have the wrong value at loop exit.
7336 This would work if the source was invariant also, however, in that
7337 case, the insn should have been moved out of the loop. */
7339 if (loop_info->num_mem_sets == 1)
7341 struct induction *v;
7343 /* If we could prove that each of the memory locations
7344 written to was different, then we could reverse the
7345 store -- but we don't presently have any way of
7346 knowing that. */
7347 reversible_mem_store = 0;
7349 /* If the store depends on a register that is set after the
7350 store, it depends on the initial value, and is thus not
7351 reversible. */
7352 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
7354 if (v->giv_type == DEST_REG
7355 && reg_mentioned_p (v->dest_reg,
7356 PATTERN (loop_info->first_loop_store_insn))
7357 && loop_insn_first_p (loop_info->first_loop_store_insn,
7358 v->insn))
7359 reversible_mem_store = 0;
7363 else
7364 return 0;
7366 /* This code only acts for innermost loops. Also it simplifies
7367 the memory address check by only reversing loops with
7368 zero or one memory access.
7369 Two memory accesses could involve parts of the same array,
7370 and that can't be reversed.
7371 If the biv is used only for counting, than we don't need to worry
7372 about all these things. */
7374 if ((num_nonfixed_reads <= 1
7375 && ! loop_info->has_nonconst_call
7376 && ! loop_info->has_volatile
7377 && reversible_mem_store
7378 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
7379 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
7380 && (bl == ivs->list && bl->next == 0))
7381 || no_use_except_counting)
7383 rtx tem;
7385 /* Loop can be reversed. */
7386 if (loop_dump_stream)
7387 fprintf (loop_dump_stream, "Can reverse loop\n");
7389 /* Now check other conditions:
7391 The increment must be a constant, as must the initial value,
7392 and the comparison code must be LT.
7394 This test can probably be improved since +/- 1 in the constant
7395 can be obtained by changing LT to LE and vice versa; this is
7396 confusing. */
7398 if (comparison
7399 /* for constants, LE gets turned into LT */
7400 && (GET_CODE (comparison) == LT
7401 || (GET_CODE (comparison) == LE
7402 && no_use_except_counting)))
7404 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
7405 rtx initial_value, comparison_value;
7406 int nonneg = 0;
7407 enum rtx_code cmp_code;
7408 int comparison_const_width;
7409 unsigned HOST_WIDE_INT comparison_sign_mask;
7411 add_val = INTVAL (bl->biv->add_val);
7412 comparison_value = XEXP (comparison, 1);
7413 if (GET_MODE (comparison_value) == VOIDmode)
7414 comparison_const_width
7415 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
7416 else
7417 comparison_const_width
7418 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
7419 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
7420 comparison_const_width = HOST_BITS_PER_WIDE_INT;
7421 comparison_sign_mask
7422 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
7424 /* If the comparison value is not a loop invariant, then we
7425 can not reverse this loop.
7427 ??? If the insns which initialize the comparison value as
7428 a whole compute an invariant result, then we could move
7429 them out of the loop and proceed with loop reversal. */
7430 if (! loop_invariant_p (loop, comparison_value))
7431 return 0;
7433 if (GET_CODE (comparison_value) == CONST_INT)
7434 comparison_val = INTVAL (comparison_value);
7435 initial_value = bl->initial_value;
7437 /* Normalize the initial value if it is an integer and
7438 has no other use except as a counter. This will allow
7439 a few more loops to be reversed. */
7440 if (no_use_except_counting
7441 && GET_CODE (comparison_value) == CONST_INT
7442 && GET_CODE (initial_value) == CONST_INT)
7444 comparison_val = comparison_val - INTVAL (bl->initial_value);
7445 /* The code below requires comparison_val to be a multiple
7446 of add_val in order to do the loop reversal, so
7447 round up comparison_val to a multiple of add_val.
7448 Since comparison_value is constant, we know that the
7449 current comparison code is LT. */
7450 comparison_val = comparison_val + add_val - 1;
7451 comparison_val
7452 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
7453 /* We postpone overflow checks for COMPARISON_VAL here;
7454 even if there is an overflow, we might still be able to
7455 reverse the loop, if converting the loop exit test to
7456 NE is possible. */
7457 initial_value = const0_rtx;
7460 /* First check if we can do a vanilla loop reversal. */
7461 if (initial_value == const0_rtx
7462 /* If we have a decrement_and_branch_on_count,
7463 prefer the NE test, since this will allow that
7464 instruction to be generated. Note that we must
7465 use a vanilla loop reversal if the biv is used to
7466 calculate a giv or has a non-counting use. */
7467 #if ! defined (HAVE_decrement_and_branch_until_zero) \
7468 && defined (HAVE_decrement_and_branch_on_count)
7469 && (! (add_val == 1 && loop->vtop
7470 && (bl->biv_count == 0
7471 || no_use_except_counting)))
7472 #endif
7473 && GET_CODE (comparison_value) == CONST_INT
7474 /* Now do postponed overflow checks on COMPARISON_VAL. */
7475 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
7476 & comparison_sign_mask))
7478 /* Register will always be nonnegative, with value
7479 0 on last iteration */
7480 add_adjust = add_val;
7481 nonneg = 1;
7482 cmp_code = GE;
7484 else if (add_val == 1 && loop->vtop
7485 && (bl->biv_count == 0
7486 || no_use_except_counting))
7488 add_adjust = 0;
7489 cmp_code = NE;
7491 else
7492 return 0;
7494 if (GET_CODE (comparison) == LE)
7495 add_adjust -= add_val;
7497 /* If the initial value is not zero, or if the comparison
7498 value is not an exact multiple of the increment, then we
7499 can not reverse this loop. */
7500 if (initial_value == const0_rtx
7501 && GET_CODE (comparison_value) == CONST_INT)
7503 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7504 return 0;
7506 else
7508 if (! no_use_except_counting || add_val != 1)
7509 return 0;
7512 final_value = comparison_value;
7514 /* Reset these in case we normalized the initial value
7515 and comparison value above. */
7516 if (GET_CODE (comparison_value) == CONST_INT
7517 && GET_CODE (initial_value) == CONST_INT)
7519 comparison_value = GEN_INT (comparison_val);
7520 final_value
7521 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7523 bl->initial_value = initial_value;
7525 /* Save some info needed to produce the new insns. */
7526 reg = bl->biv->dest_reg;
7527 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
7528 if (jump_label == pc_rtx)
7529 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
7530 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
7532 /* Set start_value; if this is not a CONST_INT, we need
7533 to generate a SUB.
7534 Initialize biv to start_value before loop start.
7535 The old initializing insn will be deleted as a
7536 dead store by flow.c. */
7537 if (initial_value == const0_rtx
7538 && GET_CODE (comparison_value) == CONST_INT)
7540 start_value = GEN_INT (comparison_val - add_adjust);
7541 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
7543 else if (GET_CODE (initial_value) == CONST_INT)
7545 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7546 enum machine_mode mode = GET_MODE (reg);
7547 enum insn_code icode
7548 = add_optab->handlers[(int) mode].insn_code;
7550 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
7551 || ! ((*insn_data[icode].operand[1].predicate)
7552 (comparison_value, mode))
7553 || ! ((*insn_data[icode].operand[2].predicate)
7554 (offset, mode)))
7555 return 0;
7556 start_value
7557 = gen_rtx_PLUS (mode, comparison_value, offset);
7558 loop_insn_hoist (loop, (GEN_FCN (icode)
7559 (reg, comparison_value, offset)));
7560 if (GET_CODE (comparison) == LE)
7561 final_value = gen_rtx_PLUS (mode, comparison_value,
7562 GEN_INT (add_val));
7564 else if (! add_adjust)
7566 enum machine_mode mode = GET_MODE (reg);
7567 enum insn_code icode
7568 = sub_optab->handlers[(int) mode].insn_code;
7569 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
7570 || ! ((*insn_data[icode].operand[1].predicate)
7571 (comparison_value, mode))
7572 || ! ((*insn_data[icode].operand[2].predicate)
7573 (initial_value, mode)))
7574 return 0;
7575 start_value
7576 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7577 loop_insn_hoist (loop, (GEN_FCN (icode)
7578 (reg, comparison_value,
7579 initial_value)));
7581 else
7582 /* We could handle the other cases too, but it'll be
7583 better to have a testcase first. */
7584 return 0;
7586 /* We may not have a single insn which can increment a reg, so
7587 create a sequence to hold all the insns from expand_inc. */
7588 start_sequence ();
7589 expand_inc (reg, new_add_val);
7590 tem = gen_sequence ();
7591 end_sequence ();
7593 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
7594 delete_insn (bl->biv->insn);
7596 /* Update biv info to reflect its new status. */
7597 bl->biv->insn = p;
7598 bl->initial_value = start_value;
7599 bl->biv->add_val = new_add_val;
7601 /* Update loop info. */
7602 loop_info->initial_value = reg;
7603 loop_info->initial_equiv_value = reg;
7604 loop_info->final_value = const0_rtx;
7605 loop_info->final_equiv_value = const0_rtx;
7606 loop_info->comparison_value = const0_rtx;
7607 loop_info->comparison_code = cmp_code;
7608 loop_info->increment = new_add_val;
7610 /* Inc LABEL_NUSES so that delete_insn will
7611 not delete the label. */
7612 LABEL_NUSES (XEXP (jump_label, 0))++;
7614 /* Emit an insn after the end of the loop to set the biv's
7615 proper exit value if it is used anywhere outside the loop. */
7616 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
7617 || ! bl->init_insn
7618 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
7619 loop_insn_sink (loop, gen_move_insn (reg, final_value));
7621 /* Delete compare/branch at end of loop. */
7622 delete_insn (PREV_INSN (loop_end));
7623 if (compare_and_branch == 2)
7624 delete_insn (first_compare);
7626 /* Add new compare/branch insn at end of loop. */
7627 start_sequence ();
7628 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
7629 GET_MODE (reg), 0, 0,
7630 XEXP (jump_label, 0));
7631 tem = gen_sequence ();
7632 end_sequence ();
7633 emit_jump_insn_before (tem, loop_end);
7635 for (tem = PREV_INSN (loop_end);
7636 tem && GET_CODE (tem) != JUMP_INSN;
7637 tem = PREV_INSN (tem))
7640 if (tem)
7641 JUMP_LABEL (tem) = XEXP (jump_label, 0);
7643 if (nonneg)
7645 if (tem)
7647 /* Increment of LABEL_NUSES done above. */
7648 /* Register is now always nonnegative,
7649 so add REG_NONNEG note to the branch. */
7650 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
7651 REG_NOTES (tem));
7653 bl->nonneg = 1;
7656 /* No insn may reference both the reversed and another biv or it
7657 will fail (see comment near the top of the loop reversal
7658 code).
7659 Earlier on, we have verified that the biv has no use except
7660 counting, or it is the only biv in this function.
7661 However, the code that computes no_use_except_counting does
7662 not verify reg notes. It's possible to have an insn that
7663 references another biv, and has a REG_EQUAL note with an
7664 expression based on the reversed biv. To avoid this case,
7665 remove all REG_EQUAL notes based on the reversed biv
7666 here. */
7667 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7668 if (INSN_P (p))
7670 rtx *pnote;
7671 rtx set = single_set (p);
7672 /* If this is a set of a GIV based on the reversed biv, any
7673 REG_EQUAL notes should still be correct. */
7674 if (! set
7675 || GET_CODE (SET_DEST (set)) != REG
7676 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
7677 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
7678 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
7679 for (pnote = &REG_NOTES (p); *pnote;)
7681 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
7682 && reg_mentioned_p (regno_reg_rtx[bl->regno],
7683 XEXP (*pnote, 0)))
7684 *pnote = XEXP (*pnote, 1);
7685 else
7686 pnote = &XEXP (*pnote, 1);
7690 /* Mark that this biv has been reversed. Each giv which depends
7691 on this biv, and which is also live past the end of the loop
7692 will have to be fixed up. */
7694 bl->reversed = 1;
7696 if (loop_dump_stream)
7698 fprintf (loop_dump_stream, "Reversed loop");
7699 if (bl->nonneg)
7700 fprintf (loop_dump_stream, " and added reg_nonneg\n");
7701 else
7702 fprintf (loop_dump_stream, "\n");
7705 return 1;
7710 return 0;
7713 /* Verify whether the biv BL appears to be eliminable,
7714 based on the insns in the loop that refer to it.
7716 If ELIMINATE_P is non-zero, actually do the elimination.
7718 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7719 determine whether invariant insns should be placed inside or at the
7720 start of the loop. */
7722 static int
7723 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
7724 const struct loop *loop;
7725 struct iv_class *bl;
7726 int eliminate_p;
7727 int threshold, insn_count;
7729 struct loop_ivs *ivs = LOOP_IVS (loop);
7730 rtx reg = bl->biv->dest_reg;
7731 rtx p;
7733 /* Scan all insns in the loop, stopping if we find one that uses the
7734 biv in a way that we cannot eliminate. */
7736 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
7738 enum rtx_code code = GET_CODE (p);
7739 basic_block where_bb = 0;
7740 rtx where_insn = threshold >= insn_count ? 0 : p;
7742 /* If this is a libcall that sets a giv, skip ahead to its end. */
7743 if (GET_RTX_CLASS (code) == 'i')
7745 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
7747 if (note)
7749 rtx last = XEXP (note, 0);
7750 rtx set = single_set (last);
7752 if (set && GET_CODE (SET_DEST (set)) == REG)
7754 unsigned int regno = REGNO (SET_DEST (set));
7756 if (regno < ivs->n_regs
7757 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
7758 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
7759 p = last;
7763 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7764 && reg_mentioned_p (reg, PATTERN (p))
7765 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
7766 eliminate_p, where_bb, where_insn))
7768 if (loop_dump_stream)
7769 fprintf (loop_dump_stream,
7770 "Cannot eliminate biv %d: biv used in insn %d.\n",
7771 bl->regno, INSN_UID (p));
7772 break;
7776 if (p == loop->end)
7778 if (loop_dump_stream)
7779 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7780 bl->regno, eliminate_p ? "was" : "can be");
7781 return 1;
7784 return 0;
7787 /* INSN and REFERENCE are instructions in the same insn chain.
7788 Return non-zero if INSN is first. */
7791 loop_insn_first_p (insn, reference)
7792 rtx insn, reference;
7794 rtx p, q;
7796 for (p = insn, q = reference;;)
7798 /* Start with test for not first so that INSN == REFERENCE yields not
7799 first. */
7800 if (q == insn || ! p)
7801 return 0;
7802 if (p == reference || ! q)
7803 return 1;
7805 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
7806 previous insn, hence the <= comparison below does not work if
7807 P is a note. */
7808 if (INSN_UID (p) < max_uid_for_loop
7809 && INSN_UID (q) < max_uid_for_loop
7810 && GET_CODE (p) != NOTE)
7811 return INSN_LUID (p) <= INSN_LUID (q);
7813 if (INSN_UID (p) >= max_uid_for_loop
7814 || GET_CODE (p) == NOTE)
7815 p = NEXT_INSN (p);
7816 if (INSN_UID (q) >= max_uid_for_loop)
7817 q = NEXT_INSN (q);
7821 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
7822 the offset that we have to take into account due to auto-increment /
7823 div derivation is zero. */
7824 static int
7825 biv_elimination_giv_has_0_offset (biv, giv, insn)
7826 struct induction *biv, *giv;
7827 rtx insn;
7829 /* If the giv V had the auto-inc address optimization applied
7830 to it, and INSN occurs between the giv insn and the biv
7831 insn, then we'd have to adjust the value used here.
7832 This is rare, so we don't bother to make this possible. */
7833 if (giv->auto_inc_opt
7834 && ((loop_insn_first_p (giv->insn, insn)
7835 && loop_insn_first_p (insn, biv->insn))
7836 || (loop_insn_first_p (biv->insn, insn)
7837 && loop_insn_first_p (insn, giv->insn))))
7838 return 0;
7840 return 1;
7843 /* If BL appears in X (part of the pattern of INSN), see if we can
7844 eliminate its use. If so, return 1. If not, return 0.
7846 If BIV does not appear in X, return 1.
7848 If ELIMINATE_P is non-zero, actually do the elimination.
7849 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
7850 Depending on how many items have been moved out of the loop, it
7851 will either be before INSN (when WHERE_INSN is non-zero) or at the
7852 start of the loop (when WHERE_INSN is zero). */
7854 static int
7855 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
7856 const struct loop *loop;
7857 rtx x, insn;
7858 struct iv_class *bl;
7859 int eliminate_p;
7860 basic_block where_bb;
7861 rtx where_insn;
7863 enum rtx_code code = GET_CODE (x);
7864 rtx reg = bl->biv->dest_reg;
7865 enum machine_mode mode = GET_MODE (reg);
7866 struct induction *v;
7867 rtx arg, tem;
7868 #ifdef HAVE_cc0
7869 rtx new;
7870 #endif
7871 int arg_operand;
7872 const char *fmt;
7873 int i, j;
7875 switch (code)
7877 case REG:
7878 /* If we haven't already been able to do something with this BIV,
7879 we can't eliminate it. */
7880 if (x == reg)
7881 return 0;
7882 return 1;
7884 case SET:
7885 /* If this sets the BIV, it is not a problem. */
7886 if (SET_DEST (x) == reg)
7887 return 1;
7889 /* If this is an insn that defines a giv, it is also ok because
7890 it will go away when the giv is reduced. */
7891 for (v = bl->giv; v; v = v->next_iv)
7892 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7893 return 1;
7895 #ifdef HAVE_cc0
7896 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7898 /* Can replace with any giv that was reduced and
7899 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7900 Require a constant for MULT_VAL, so we know it's nonzero.
7901 ??? We disable this optimization to avoid potential
7902 overflows. */
7904 for (v = bl->giv; v; v = v->next_iv)
7905 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
7906 && v->add_val == const0_rtx
7907 && ! v->ignore && ! v->maybe_dead && v->always_computable
7908 && v->mode == mode
7909 && 0)
7911 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7912 continue;
7914 if (! eliminate_p)
7915 return 1;
7917 /* If the giv has the opposite direction of change,
7918 then reverse the comparison. */
7919 if (INTVAL (v->mult_val) < 0)
7920 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7921 const0_rtx, v->new_reg);
7922 else
7923 new = v->new_reg;
7925 /* We can probably test that giv's reduced reg. */
7926 if (validate_change (insn, &SET_SRC (x), new, 0))
7927 return 1;
7930 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7931 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7932 Require a constant for MULT_VAL, so we know it's nonzero.
7933 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7934 overflow problem. */
7936 for (v = bl->giv; v; v = v->next_iv)
7937 if (GET_CODE (v->mult_val) == CONST_INT
7938 && v->mult_val != const0_rtx
7939 && ! v->ignore && ! v->maybe_dead && v->always_computable
7940 && v->mode == mode
7941 && (GET_CODE (v->add_val) == SYMBOL_REF
7942 || GET_CODE (v->add_val) == LABEL_REF
7943 || GET_CODE (v->add_val) == CONST
7944 || (GET_CODE (v->add_val) == REG
7945 && REG_POINTER (v->add_val))))
7947 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7948 continue;
7950 if (! eliminate_p)
7951 return 1;
7953 /* If the giv has the opposite direction of change,
7954 then reverse the comparison. */
7955 if (INTVAL (v->mult_val) < 0)
7956 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7957 v->new_reg);
7958 else
7959 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7960 copy_rtx (v->add_val));
7962 /* Replace biv with the giv's reduced register. */
7963 update_reg_last_use (v->add_val, insn);
7964 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7965 return 1;
7967 /* Insn doesn't support that constant or invariant. Copy it
7968 into a register (it will be a loop invariant.) */
7969 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7971 loop_insn_emit_before (loop, 0, where_insn,
7972 gen_move_insn (tem,
7973 copy_rtx (v->add_val)));
7975 /* Substitute the new register for its invariant value in
7976 the compare expression. */
7977 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
7978 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7979 return 1;
7982 #endif
7983 break;
7985 case COMPARE:
7986 case EQ: case NE:
7987 case GT: case GE: case GTU: case GEU:
7988 case LT: case LE: case LTU: case LEU:
7989 /* See if either argument is the biv. */
7990 if (XEXP (x, 0) == reg)
7991 arg = XEXP (x, 1), arg_operand = 1;
7992 else if (XEXP (x, 1) == reg)
7993 arg = XEXP (x, 0), arg_operand = 0;
7994 else
7995 break;
7997 if (CONSTANT_P (arg))
7999 /* First try to replace with any giv that has constant positive
8000 mult_val and constant add_val. We might be able to support
8001 negative mult_val, but it seems complex to do it in general. */
8003 for (v = bl->giv; v; v = v->next_iv)
8004 if (GET_CODE (v->mult_val) == CONST_INT
8005 && INTVAL (v->mult_val) > 0
8006 && (GET_CODE (v->add_val) == SYMBOL_REF
8007 || GET_CODE (v->add_val) == LABEL_REF
8008 || GET_CODE (v->add_val) == CONST
8009 || (GET_CODE (v->add_val) == REG
8010 && REG_POINTER (v->add_val)))
8011 && ! v->ignore && ! v->maybe_dead && v->always_computable
8012 && v->mode == mode)
8014 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8015 continue;
8017 if (! eliminate_p)
8018 return 1;
8020 /* Replace biv with the giv's reduced reg. */
8021 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8023 /* If all constants are actually constant integers and
8024 the derived constant can be directly placed in the COMPARE,
8025 do so. */
8026 if (GET_CODE (arg) == CONST_INT
8027 && GET_CODE (v->mult_val) == CONST_INT
8028 && GET_CODE (v->add_val) == CONST_INT)
8030 validate_change (insn, &XEXP (x, arg_operand),
8031 GEN_INT (INTVAL (arg)
8032 * INTVAL (v->mult_val)
8033 + INTVAL (v->add_val)), 1);
8035 else
8037 /* Otherwise, load it into a register. */
8038 tem = gen_reg_rtx (mode);
8039 loop_iv_add_mult_emit_before (loop, arg,
8040 v->mult_val, v->add_val,
8041 tem, where_bb, where_insn);
8042 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8044 if (apply_change_group ())
8045 return 1;
8048 /* Look for giv with positive constant mult_val and nonconst add_val.
8049 Insert insns to calculate new compare value.
8050 ??? Turn this off due to possible overflow. */
8052 for (v = bl->giv; v; v = v->next_iv)
8053 if (GET_CODE (v->mult_val) == CONST_INT
8054 && INTVAL (v->mult_val) > 0
8055 && ! v->ignore && ! v->maybe_dead && v->always_computable
8056 && v->mode == mode
8057 && 0)
8059 rtx tem;
8061 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8062 continue;
8064 if (! eliminate_p)
8065 return 1;
8067 tem = gen_reg_rtx (mode);
8069 /* Replace biv with giv's reduced register. */
8070 validate_change (insn, &XEXP (x, 1 - arg_operand),
8071 v->new_reg, 1);
8073 /* Compute value to compare against. */
8074 loop_iv_add_mult_emit_before (loop, arg,
8075 v->mult_val, v->add_val,
8076 tem, where_bb, where_insn);
8077 /* Use it in this insn. */
8078 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8079 if (apply_change_group ())
8080 return 1;
8083 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8085 if (loop_invariant_p (loop, arg) == 1)
8087 /* Look for giv with constant positive mult_val and nonconst
8088 add_val. Insert insns to compute new compare value.
8089 ??? Turn this off due to possible overflow. */
8091 for (v = bl->giv; v; v = v->next_iv)
8092 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8093 && ! v->ignore && ! v->maybe_dead && v->always_computable
8094 && v->mode == mode
8095 && 0)
8097 rtx tem;
8099 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8100 continue;
8102 if (! eliminate_p)
8103 return 1;
8105 tem = gen_reg_rtx (mode);
8107 /* Replace biv with giv's reduced register. */
8108 validate_change (insn, &XEXP (x, 1 - arg_operand),
8109 v->new_reg, 1);
8111 /* Compute value to compare against. */
8112 loop_iv_add_mult_emit_before (loop, arg,
8113 v->mult_val, v->add_val,
8114 tem, where_bb, where_insn);
8115 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8116 if (apply_change_group ())
8117 return 1;
8121 /* This code has problems. Basically, you can't know when
8122 seeing if we will eliminate BL, whether a particular giv
8123 of ARG will be reduced. If it isn't going to be reduced,
8124 we can't eliminate BL. We can try forcing it to be reduced,
8125 but that can generate poor code.
8127 The problem is that the benefit of reducing TV, below should
8128 be increased if BL can actually be eliminated, but this means
8129 we might have to do a topological sort of the order in which
8130 we try to process biv. It doesn't seem worthwhile to do
8131 this sort of thing now. */
8133 #if 0
8134 /* Otherwise the reg compared with had better be a biv. */
8135 if (GET_CODE (arg) != REG
8136 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8137 return 0;
8139 /* Look for a pair of givs, one for each biv,
8140 with identical coefficients. */
8141 for (v = bl->giv; v; v = v->next_iv)
8143 struct induction *tv;
8145 if (v->ignore || v->maybe_dead || v->mode != mode)
8146 continue;
8148 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8149 tv = tv->next_iv)
8150 if (! tv->ignore && ! tv->maybe_dead
8151 && rtx_equal_p (tv->mult_val, v->mult_val)
8152 && rtx_equal_p (tv->add_val, v->add_val)
8153 && tv->mode == mode)
8155 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8156 continue;
8158 if (! eliminate_p)
8159 return 1;
8161 /* Replace biv with its giv's reduced reg. */
8162 XEXP (x, 1 - arg_operand) = v->new_reg;
8163 /* Replace other operand with the other giv's
8164 reduced reg. */
8165 XEXP (x, arg_operand) = tv->new_reg;
8166 return 1;
8169 #endif
8172 /* If we get here, the biv can't be eliminated. */
8173 return 0;
8175 case MEM:
8176 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8177 biv is used in it, since it will be replaced. */
8178 for (v = bl->giv; v; v = v->next_iv)
8179 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8180 return 1;
8181 break;
8183 default:
8184 break;
8187 /* See if any subexpression fails elimination. */
8188 fmt = GET_RTX_FORMAT (code);
8189 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8191 switch (fmt[i])
8193 case 'e':
8194 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8195 eliminate_p, where_bb, where_insn))
8196 return 0;
8197 break;
8199 case 'E':
8200 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8201 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8202 eliminate_p, where_bb, where_insn))
8203 return 0;
8204 break;
8208 return 1;
8211 /* Return nonzero if the last use of REG
8212 is in an insn following INSN in the same basic block. */
8214 static int
8215 last_use_this_basic_block (reg, insn)
8216 rtx reg;
8217 rtx insn;
8219 rtx n;
8220 for (n = insn;
8221 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8222 n = NEXT_INSN (n))
8224 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8225 return 1;
8227 return 0;
8230 /* Called via `note_stores' to record the initial value of a biv. Here we
8231 just record the location of the set and process it later. */
8233 static void
8234 record_initial (dest, set, data)
8235 rtx dest;
8236 rtx set;
8237 void *data ATTRIBUTE_UNUSED;
8239 struct loop_ivs *ivs = (struct loop_ivs *) data;
8240 struct iv_class *bl;
8242 if (GET_CODE (dest) != REG
8243 || REGNO (dest) >= ivs->n_regs
8244 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8245 return;
8247 bl = REG_IV_CLASS (ivs, REGNO (dest));
8249 /* If this is the first set found, record it. */
8250 if (bl->init_insn == 0)
8252 bl->init_insn = note_insn;
8253 bl->init_set = set;
8257 /* If any of the registers in X are "old" and currently have a last use earlier
8258 than INSN, update them to have a last use of INSN. Their actual last use
8259 will be the previous insn but it will not have a valid uid_luid so we can't
8260 use it. X must be a source expression only. */
8262 static void
8263 update_reg_last_use (x, insn)
8264 rtx x;
8265 rtx insn;
8267 /* Check for the case where INSN does not have a valid luid. In this case,
8268 there is no need to modify the regno_last_uid, as this can only happen
8269 when code is inserted after the loop_end to set a pseudo's final value,
8270 and hence this insn will never be the last use of x.
8271 ???? This comment is not correct. See for example loop_givs_reduce.
8272 This may insert an insn before another new insn. */
8273 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8274 && INSN_UID (insn) < max_uid_for_loop
8275 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8277 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8279 else
8281 register int i, j;
8282 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8283 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8285 if (fmt[i] == 'e')
8286 update_reg_last_use (XEXP (x, i), insn);
8287 else if (fmt[i] == 'E')
8288 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8289 update_reg_last_use (XVECEXP (x, i, j), insn);
8294 /* Given an insn INSN and condition COND, return the condition in a
8295 canonical form to simplify testing by callers. Specifically:
8297 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8298 (2) Both operands will be machine operands; (cc0) will have been replaced.
8299 (3) If an operand is a constant, it will be the second operand.
8300 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8301 for GE, GEU, and LEU.
8303 If the condition cannot be understood, or is an inequality floating-point
8304 comparison which needs to be reversed, 0 will be returned.
8306 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
8308 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8309 insn used in locating the condition was found. If a replacement test
8310 of the condition is desired, it should be placed in front of that
8311 insn and we will be sure that the inputs are still valid.
8313 If WANT_REG is non-zero, we wish the condition to be relative to that
8314 register, if possible. Therefore, do not canonicalize the condition
8315 further. */
8318 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
8319 rtx insn;
8320 rtx cond;
8321 int reverse;
8322 rtx *earliest;
8323 rtx want_reg;
8325 enum rtx_code code;
8326 rtx prev = insn;
8327 rtx set;
8328 rtx tem;
8329 rtx op0, op1;
8330 int reverse_code = 0;
8331 int did_reverse_condition = 0;
8332 enum machine_mode mode;
8334 code = GET_CODE (cond);
8335 mode = GET_MODE (cond);
8336 op0 = XEXP (cond, 0);
8337 op1 = XEXP (cond, 1);
8339 if (reverse)
8341 code = reverse_condition (code);
8342 did_reverse_condition ^= 1;
8345 if (earliest)
8346 *earliest = insn;
8348 /* If we are comparing a register with zero, see if the register is set
8349 in the previous insn to a COMPARE or a comparison operation. Perform
8350 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
8351 in cse.c */
8353 while (GET_RTX_CLASS (code) == '<'
8354 && op1 == CONST0_RTX (GET_MODE (op0))
8355 && op0 != want_reg)
8357 /* Set non-zero when we find something of interest. */
8358 rtx x = 0;
8360 #ifdef HAVE_cc0
8361 /* If comparison with cc0, import actual comparison from compare
8362 insn. */
8363 if (op0 == cc0_rtx)
8365 if ((prev = prev_nonnote_insn (prev)) == 0
8366 || GET_CODE (prev) != INSN
8367 || (set = single_set (prev)) == 0
8368 || SET_DEST (set) != cc0_rtx)
8369 return 0;
8371 op0 = SET_SRC (set);
8372 op1 = CONST0_RTX (GET_MODE (op0));
8373 if (earliest)
8374 *earliest = prev;
8376 #endif
8378 /* If this is a COMPARE, pick up the two things being compared. */
8379 if (GET_CODE (op0) == COMPARE)
8381 op1 = XEXP (op0, 1);
8382 op0 = XEXP (op0, 0);
8383 continue;
8385 else if (GET_CODE (op0) != REG)
8386 break;
8388 /* Go back to the previous insn. Stop if it is not an INSN. We also
8389 stop if it isn't a single set or if it has a REG_INC note because
8390 we don't want to bother dealing with it. */
8392 if ((prev = prev_nonnote_insn (prev)) == 0
8393 || GET_CODE (prev) != INSN
8394 || FIND_REG_INC_NOTE (prev, 0)
8395 || (set = single_set (prev)) == 0)
8396 break;
8398 /* If this is setting OP0, get what it sets it to if it looks
8399 relevant. */
8400 if (rtx_equal_p (SET_DEST (set), op0))
8402 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
8404 /* ??? We may not combine comparisons done in a CCmode with
8405 comparisons not done in a CCmode. This is to aid targets
8406 like Alpha that have an IEEE compliant EQ instruction, and
8407 a non-IEEE compliant BEQ instruction. The use of CCmode is
8408 actually artificial, simply to prevent the combination, but
8409 should not affect other platforms.
8411 However, we must allow VOIDmode comparisons to match either
8412 CCmode or non-CCmode comparison, because some ports have
8413 modeless comparisons inside branch patterns.
8415 ??? This mode check should perhaps look more like the mode check
8416 in simplify_comparison in combine. */
8418 if ((GET_CODE (SET_SRC (set)) == COMPARE
8419 || (((code == NE
8420 || (code == LT
8421 && GET_MODE_CLASS (inner_mode) == MODE_INT
8422 && (GET_MODE_BITSIZE (inner_mode)
8423 <= HOST_BITS_PER_WIDE_INT)
8424 && (STORE_FLAG_VALUE
8425 & ((HOST_WIDE_INT) 1
8426 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8427 #ifdef FLOAT_STORE_FLAG_VALUE
8428 || (code == LT
8429 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8430 && (REAL_VALUE_NEGATIVE
8431 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8432 #endif
8434 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
8435 && (((GET_MODE_CLASS (mode) == MODE_CC)
8436 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8437 || mode == VOIDmode || inner_mode == VOIDmode))
8438 x = SET_SRC (set);
8439 else if (((code == EQ
8440 || (code == GE
8441 && (GET_MODE_BITSIZE (inner_mode)
8442 <= HOST_BITS_PER_WIDE_INT)
8443 && GET_MODE_CLASS (inner_mode) == MODE_INT
8444 && (STORE_FLAG_VALUE
8445 & ((HOST_WIDE_INT) 1
8446 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8447 #ifdef FLOAT_STORE_FLAG_VALUE
8448 || (code == GE
8449 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8450 && (REAL_VALUE_NEGATIVE
8451 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8452 #endif
8454 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
8455 && (((GET_MODE_CLASS (mode) == MODE_CC)
8456 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8457 || mode == VOIDmode || inner_mode == VOIDmode))
8460 /* We might have reversed a LT to get a GE here. But this wasn't
8461 actually the comparison of data, so we don't flag that we
8462 have had to reverse the condition. */
8463 did_reverse_condition ^= 1;
8464 reverse_code = 1;
8465 x = SET_SRC (set);
8467 else
8468 break;
8471 else if (reg_set_p (op0, prev))
8472 /* If this sets OP0, but not directly, we have to give up. */
8473 break;
8475 if (x)
8477 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
8478 code = GET_CODE (x);
8479 if (reverse_code)
8481 code = reverse_condition (code);
8482 if (code == UNKNOWN)
8483 return 0;
8484 did_reverse_condition ^= 1;
8485 reverse_code = 0;
8488 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
8489 if (earliest)
8490 *earliest = prev;
8494 /* If constant is first, put it last. */
8495 if (CONSTANT_P (op0))
8496 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
8498 /* If OP0 is the result of a comparison, we weren't able to find what
8499 was really being compared, so fail. */
8500 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
8501 return 0;
8503 /* Canonicalize any ordered comparison with integers involving equality
8504 if we can do computations in the relevant mode and we do not
8505 overflow. */
8507 if (GET_CODE (op1) == CONST_INT
8508 && GET_MODE (op0) != VOIDmode
8509 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
8511 HOST_WIDE_INT const_val = INTVAL (op1);
8512 unsigned HOST_WIDE_INT uconst_val = const_val;
8513 unsigned HOST_WIDE_INT max_val
8514 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
8516 switch (code)
8518 case LE:
8519 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
8520 code = LT, op1 = GEN_INT (const_val + 1);
8521 break;
8523 /* When cross-compiling, const_val might be sign-extended from
8524 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
8525 case GE:
8526 if ((HOST_WIDE_INT) (const_val & max_val)
8527 != (((HOST_WIDE_INT) 1
8528 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
8529 code = GT, op1 = GEN_INT (const_val - 1);
8530 break;
8532 case LEU:
8533 if (uconst_val < max_val)
8534 code = LTU, op1 = GEN_INT (uconst_val + 1);
8535 break;
8537 case GEU:
8538 if (uconst_val != 0)
8539 code = GTU, op1 = GEN_INT (uconst_val - 1);
8540 break;
8542 default:
8543 break;
8547 /* If this was floating-point and we reversed anything other than an
8548 EQ or NE or (UN)ORDERED, return zero. */
8549 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
8550 && did_reverse_condition
8551 && code != NE && code != EQ && code != UNORDERED && code != ORDERED
8552 && ! flag_fast_math
8553 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8554 return 0;
8556 #ifdef HAVE_cc0
8557 /* Never return CC0; return zero instead. */
8558 if (op0 == cc0_rtx)
8559 return 0;
8560 #endif
8562 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
8565 /* Given a jump insn JUMP, return the condition that will cause it to branch
8566 to its JUMP_LABEL. If the condition cannot be understood, or is an
8567 inequality floating-point comparison which needs to be reversed, 0 will
8568 be returned.
8570 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8571 insn used in locating the condition was found. If a replacement test
8572 of the condition is desired, it should be placed in front of that
8573 insn and we will be sure that the inputs are still valid. */
8576 get_condition (jump, earliest)
8577 rtx jump;
8578 rtx *earliest;
8580 rtx cond;
8581 int reverse;
8582 rtx set;
8584 /* If this is not a standard conditional jump, we can't parse it. */
8585 if (GET_CODE (jump) != JUMP_INSN
8586 || ! any_condjump_p (jump))
8587 return 0;
8588 set = pc_set (jump);
8590 cond = XEXP (SET_SRC (set), 0);
8592 /* If this branches to JUMP_LABEL when the condition is false, reverse
8593 the condition. */
8594 reverse
8595 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
8596 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
8598 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
8601 /* Similar to above routine, except that we also put an invariant last
8602 unless both operands are invariants. */
8605 get_condition_for_loop (loop, x)
8606 const struct loop *loop;
8607 rtx x;
8609 rtx comparison = get_condition (x, NULL_PTR);
8611 if (comparison == 0
8612 || ! loop_invariant_p (loop, XEXP (comparison, 0))
8613 || loop_invariant_p (loop, XEXP (comparison, 1)))
8614 return comparison;
8616 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
8617 XEXP (comparison, 1), XEXP (comparison, 0));
8620 /* Scan the function and determine whether it has indirect (computed) jumps.
8622 This is taken mostly from flow.c; similar code exists elsewhere
8623 in the compiler. It may be useful to put this into rtlanal.c. */
8624 static int
8625 indirect_jump_in_function_p (start)
8626 rtx start;
8628 rtx insn;
8630 for (insn = start; insn; insn = NEXT_INSN (insn))
8631 if (computed_jump_p (insn))
8632 return 1;
8634 return 0;
8637 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8638 documentation for LOOP_MEMS for the definition of `appropriate'.
8639 This function is called from prescan_loop via for_each_rtx. */
8641 static int
8642 insert_loop_mem (mem, data)
8643 rtx *mem;
8644 void *data ATTRIBUTE_UNUSED;
8646 struct loop_info *loop_info = data;
8647 int i;
8648 rtx m = *mem;
8650 if (m == NULL_RTX)
8651 return 0;
8653 switch (GET_CODE (m))
8655 case MEM:
8656 break;
8658 case CLOBBER:
8659 /* We're not interested in MEMs that are only clobbered. */
8660 return -1;
8662 case CONST_DOUBLE:
8663 /* We're not interested in the MEM associated with a
8664 CONST_DOUBLE, so there's no need to traverse into this. */
8665 return -1;
8667 case EXPR_LIST:
8668 /* We're not interested in any MEMs that only appear in notes. */
8669 return -1;
8671 default:
8672 /* This is not a MEM. */
8673 return 0;
8676 /* See if we've already seen this MEM. */
8677 for (i = 0; i < loop_info->mems_idx; ++i)
8678 if (rtx_equal_p (m, loop_info->mems[i].mem))
8680 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
8681 /* The modes of the two memory accesses are different. If
8682 this happens, something tricky is going on, and we just
8683 don't optimize accesses to this MEM. */
8684 loop_info->mems[i].optimize = 0;
8686 return 0;
8689 /* Resize the array, if necessary. */
8690 if (loop_info->mems_idx == loop_info->mems_allocated)
8692 if (loop_info->mems_allocated != 0)
8693 loop_info->mems_allocated *= 2;
8694 else
8695 loop_info->mems_allocated = 32;
8697 loop_info->mems = (loop_mem_info *)
8698 xrealloc (loop_info->mems,
8699 loop_info->mems_allocated * sizeof (loop_mem_info));
8702 /* Actually insert the MEM. */
8703 loop_info->mems[loop_info->mems_idx].mem = m;
8704 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8705 because we can't put it in a register. We still store it in the
8706 table, though, so that if we see the same address later, but in a
8707 non-BLK mode, we'll not think we can optimize it at that point. */
8708 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
8709 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
8710 ++loop_info->mems_idx;
8712 return 0;
8716 /* Allocate REGS->ARRAY or reallocate it if it is too small.
8718 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
8719 register that is modified by an insn between FROM and TO. If the
8720 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
8721 more, stop incrementing it, to avoid overflow.
8723 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
8724 register I is used, if it is only used once. Otherwise, it is set
8725 to 0 (for no uses) or const0_rtx for more than one use. This
8726 parameter may be zero, in which case this processing is not done.
8728 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
8729 optimize register I. */
8731 static void
8732 loop_regs_scan (loop, extra_size)
8733 const struct loop *loop;
8734 int extra_size;
8736 struct loop_regs *regs = LOOP_REGS (loop);
8737 int old_nregs;
8738 /* last_set[n] is nonzero iff reg n has been set in the current
8739 basic block. In that case, it is the insn that last set reg n. */
8740 rtx *last_set;
8741 rtx insn;
8742 int i;
8744 old_nregs = regs->num;
8745 regs->num = max_reg_num ();
8747 /* Grow the regs array if not allocated or too small. */
8748 if (regs->num >= regs->size)
8750 regs->size = regs->num + extra_size;
8752 regs->array = (struct loop_reg *)
8753 xrealloc (regs->array, regs->size * sizeof (*regs->array));
8755 /* Zero the new elements. */
8756 memset (regs->array + old_nregs, 0,
8757 (regs->size - old_nregs) * sizeof (*regs->array));
8760 /* Clear previously scanned fields but do not clear n_times_set. */
8761 for (i = 0; i < old_nregs; i++)
8763 regs->array[i].set_in_loop = 0;
8764 regs->array[i].may_not_optimize = 0;
8765 regs->array[i].single_usage = NULL_RTX;
8768 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
8770 /* Scan the loop, recording register usage. */
8771 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8772 insn = NEXT_INSN (insn))
8774 if (INSN_P (insn))
8776 /* Record registers that have exactly one use. */
8777 find_single_use_in_loop (regs, insn, PATTERN (insn));
8779 /* Include uses in REG_EQUAL notes. */
8780 if (REG_NOTES (insn))
8781 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
8783 if (GET_CODE (PATTERN (insn)) == SET
8784 || GET_CODE (PATTERN (insn)) == CLOBBER)
8785 count_one_set (regs, insn, PATTERN (insn), last_set);
8786 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8788 register int i;
8789 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8790 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
8791 last_set);
8795 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
8796 memset (last_set, 0, regs->num * sizeof (rtx));
8799 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8801 regs->array[i].may_not_optimize = 1;
8802 regs->array[i].set_in_loop = 1;
8805 #ifdef AVOID_CCMODE_COPIES
8806 /* Don't try to move insns which set CC registers if we should not
8807 create CCmode register copies. */
8808 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
8809 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8810 regs->array[i].may_not_optimize = 1;
8811 #endif
8813 /* Set regs->array[I].n_times_set for the new registers. */
8814 for (i = old_nregs; i < regs->num; i++)
8815 regs->array[i].n_times_set = regs->array[i].set_in_loop;
8817 free (last_set);
8820 /* Returns the number of real INSNs in the LOOP. */
8822 static int
8823 count_insns_in_loop (loop)
8824 const struct loop *loop;
8826 int count = 0;
8827 rtx insn;
8829 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8830 insn = NEXT_INSN (insn))
8831 if (INSN_P (insn))
8832 ++count;
8834 return count;
8837 /* Move MEMs into registers for the duration of the loop. */
8839 static void
8840 load_mems (loop)
8841 const struct loop *loop;
8843 struct loop_info *loop_info = LOOP_INFO (loop);
8844 struct loop_regs *regs = LOOP_REGS (loop);
8845 int maybe_never = 0;
8846 int i;
8847 rtx p, prev_ebb_head;
8848 rtx label = NULL_RTX;
8849 rtx end_label;
8850 /* Nonzero if the next instruction may never be executed. */
8851 int next_maybe_never = 0;
8852 unsigned int last_max_reg = max_reg_num ();
8854 if (loop_info->mems_idx == 0)
8855 return;
8857 /* We cannot use next_label here because it skips over normal insns. */
8858 end_label = next_nonnote_insn (loop->end);
8859 if (end_label && GET_CODE (end_label) != CODE_LABEL)
8860 end_label = NULL_RTX;
8862 /* Check to see if it's possible that some instructions in the loop are
8863 never executed. Also check if there is a goto out of the loop other
8864 than right after the end of the loop. */
8865 for (p = next_insn_in_loop (loop, loop->scan_start);
8866 p != NULL_RTX;
8867 p = next_insn_in_loop (loop, p))
8869 if (GET_CODE (p) == CODE_LABEL)
8870 maybe_never = 1;
8871 else if (GET_CODE (p) == JUMP_INSN
8872 /* If we enter the loop in the middle, and scan
8873 around to the beginning, don't set maybe_never
8874 for that. This must be an unconditional jump,
8875 otherwise the code at the top of the loop might
8876 never be executed. Unconditional jumps are
8877 followed a by barrier then loop end. */
8878 && ! (GET_CODE (p) == JUMP_INSN
8879 && JUMP_LABEL (p) == loop->top
8880 && NEXT_INSN (NEXT_INSN (p)) == loop->end
8881 && any_uncondjump_p (p)))
8883 /* If this is a jump outside of the loop but not right
8884 after the end of the loop, we would have to emit new fixup
8885 sequences for each such label. */
8886 if (/* If we can't tell where control might go when this
8887 JUMP_INSN is executed, we must be conservative. */
8888 !JUMP_LABEL (p)
8889 || (JUMP_LABEL (p) != end_label
8890 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
8891 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
8892 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
8893 return;
8895 if (!any_condjump_p (p))
8896 /* Something complicated. */
8897 maybe_never = 1;
8898 else
8899 /* If there are any more instructions in the loop, they
8900 might not be reached. */
8901 next_maybe_never = 1;
8903 else if (next_maybe_never)
8904 maybe_never = 1;
8907 /* Find start of the extended basic block that enters the loop. */
8908 for (p = loop->start;
8909 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
8910 p = PREV_INSN (p))
8912 prev_ebb_head = p;
8914 cselib_init ();
8916 /* Build table of mems that get set to constant values before the
8917 loop. */
8918 for (; p != loop->start; p = NEXT_INSN (p))
8919 cselib_process_insn (p);
8921 /* Actually move the MEMs. */
8922 for (i = 0; i < loop_info->mems_idx; ++i)
8924 regset_head load_copies;
8925 regset_head store_copies;
8926 int written = 0;
8927 rtx reg;
8928 rtx mem = loop_info->mems[i].mem;
8929 rtx mem_list_entry;
8931 if (MEM_VOLATILE_P (mem)
8932 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
8933 /* There's no telling whether or not MEM is modified. */
8934 loop_info->mems[i].optimize = 0;
8936 /* Go through the MEMs written to in the loop to see if this
8937 one is aliased by one of them. */
8938 mem_list_entry = loop_info->store_mems;
8939 while (mem_list_entry)
8941 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
8942 written = 1;
8943 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
8944 mem, rtx_varies_p))
8946 /* MEM is indeed aliased by this store. */
8947 loop_info->mems[i].optimize = 0;
8948 break;
8950 mem_list_entry = XEXP (mem_list_entry, 1);
8953 if (flag_float_store && written
8954 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
8955 loop_info->mems[i].optimize = 0;
8957 /* If this MEM is written to, we must be sure that there
8958 are no reads from another MEM that aliases this one. */
8959 if (loop_info->mems[i].optimize && written)
8961 int j;
8963 for (j = 0; j < loop_info->mems_idx; ++j)
8965 if (j == i)
8966 continue;
8967 else if (true_dependence (mem,
8968 VOIDmode,
8969 loop_info->mems[j].mem,
8970 rtx_varies_p))
8972 /* It's not safe to hoist loop_info->mems[i] out of
8973 the loop because writes to it might not be
8974 seen by reads from loop_info->mems[j]. */
8975 loop_info->mems[i].optimize = 0;
8976 break;
8981 if (maybe_never && may_trap_p (mem))
8982 /* We can't access the MEM outside the loop; it might
8983 cause a trap that wouldn't have happened otherwise. */
8984 loop_info->mems[i].optimize = 0;
8986 if (!loop_info->mems[i].optimize)
8987 /* We thought we were going to lift this MEM out of the
8988 loop, but later discovered that we could not. */
8989 continue;
8991 INIT_REG_SET (&load_copies);
8992 INIT_REG_SET (&store_copies);
8994 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
8995 order to keep scan_loop from moving stores to this MEM
8996 out of the loop just because this REG is neither a
8997 user-variable nor used in the loop test. */
8998 reg = gen_reg_rtx (GET_MODE (mem));
8999 REG_USERVAR_P (reg) = 1;
9000 loop_info->mems[i].reg = reg;
9002 /* Now, replace all references to the MEM with the
9003 corresponding pseudos. */
9004 maybe_never = 0;
9005 for (p = next_insn_in_loop (loop, loop->scan_start);
9006 p != NULL_RTX;
9007 p = next_insn_in_loop (loop, p))
9009 if (INSN_P (p))
9011 rtx set;
9013 set = single_set (p);
9015 /* See if this copies the mem into a register that isn't
9016 modified afterwards. We'll try to do copy propagation
9017 a little further on. */
9018 if (set
9019 /* @@@ This test is _way_ too conservative. */
9020 && ! maybe_never
9021 && GET_CODE (SET_DEST (set)) == REG
9022 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9023 && REGNO (SET_DEST (set)) < last_max_reg
9024 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9025 && rtx_equal_p (SET_SRC (set), mem))
9026 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9028 /* See if this copies the mem from a register that isn't
9029 modified afterwards. We'll try to remove the
9030 redundant copy later on by doing a little register
9031 renaming and copy propagation. This will help
9032 to untangle things for the BIV detection code. */
9033 if (set
9034 && ! maybe_never
9035 && GET_CODE (SET_SRC (set)) == REG
9036 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9037 && REGNO (SET_SRC (set)) < last_max_reg
9038 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9039 && rtx_equal_p (SET_DEST (set), mem))
9040 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9042 /* Replace the memory reference with the shadow register. */
9043 replace_loop_mems (p, loop_info->mems[i].mem,
9044 loop_info->mems[i].reg);
9047 if (GET_CODE (p) == CODE_LABEL
9048 || GET_CODE (p) == JUMP_INSN)
9049 maybe_never = 1;
9052 if (! apply_change_group ())
9053 /* We couldn't replace all occurrences of the MEM. */
9054 loop_info->mems[i].optimize = 0;
9055 else
9057 /* Load the memory immediately before LOOP->START, which is
9058 the NOTE_LOOP_BEG. */
9059 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9060 rtx set;
9061 rtx best = mem;
9062 int j;
9063 struct elt_loc_list *const_equiv = 0;
9065 if (e)
9067 struct elt_loc_list *equiv;
9068 struct elt_loc_list *best_equiv = 0;
9069 for (equiv = e->locs; equiv; equiv = equiv->next)
9071 if (CONSTANT_P (equiv->loc))
9072 const_equiv = equiv;
9073 else if (GET_CODE (equiv->loc) == REG
9074 /* Extending hard register lifetimes causes crash
9075 on SRC targets. Doing so on non-SRC is
9076 probably also not good idea, since we most
9077 probably have pseudoregister equivalence as
9078 well. */
9079 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9080 best_equiv = equiv;
9082 /* Use the constant equivalence if that is cheap enough. */
9083 if (! best_equiv)
9084 best_equiv = const_equiv;
9085 else if (const_equiv
9086 && (rtx_cost (const_equiv->loc, SET)
9087 <= rtx_cost (best_equiv->loc, SET)))
9089 best_equiv = const_equiv;
9090 const_equiv = 0;
9093 /* If best_equiv is nonzero, we know that MEM is set to a
9094 constant or register before the loop. We will use this
9095 knowledge to initialize the shadow register with that
9096 constant or reg rather than by loading from MEM. */
9097 if (best_equiv)
9098 best = copy_rtx (best_equiv->loc);
9101 set = gen_move_insn (reg, best);
9102 set = loop_insn_hoist (loop, set);
9103 if (REG_P (best))
9105 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9106 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9108 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9109 break;
9113 if (const_equiv)
9114 REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
9115 copy_rtx (const_equiv->loc),
9116 REG_NOTES (set));
9118 if (written)
9120 if (label == NULL_RTX)
9122 label = gen_label_rtx ();
9123 emit_label_after (label, loop->end);
9126 /* Store the memory immediately after END, which is
9127 the NOTE_LOOP_END. */
9128 set = gen_move_insn (copy_rtx (mem), reg);
9129 loop_insn_emit_after (loop, 0, label, set);
9132 if (loop_dump_stream)
9134 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9135 REGNO (reg), (written ? "r/w" : "r/o"));
9136 print_rtl (loop_dump_stream, mem);
9137 fputc ('\n', loop_dump_stream);
9140 /* Attempt a bit of copy propagation. This helps untangle the
9141 data flow, and enables {basic,general}_induction_var to find
9142 more bivs/givs. */
9143 EXECUTE_IF_SET_IN_REG_SET
9144 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9146 try_copy_prop (loop, reg, j);
9148 CLEAR_REG_SET (&load_copies);
9150 EXECUTE_IF_SET_IN_REG_SET
9151 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9153 try_swap_copy_prop (loop, reg, j);
9155 CLEAR_REG_SET (&store_copies);
9159 if (label != NULL_RTX && end_label != NULL_RTX)
9161 /* Now, we need to replace all references to the previous exit
9162 label with the new one. */
9163 rtx_pair rr;
9164 rr.r1 = end_label;
9165 rr.r2 = label;
9167 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9169 for_each_rtx (&p, replace_label, &rr);
9171 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9172 field. This is not handled by for_each_rtx because it doesn't
9173 handle unprinted ('0') fields. We need to update JUMP_LABEL
9174 because the immediately following unroll pass will use it.
9175 replace_label would not work anyways, because that only handles
9176 LABEL_REFs. */
9177 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9178 JUMP_LABEL (p) = label;
9182 cselib_finish ();
9185 /* For communication between note_reg_stored and its caller. */
9186 struct note_reg_stored_arg
9188 int set_seen;
9189 rtx reg;
9192 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9193 is equal to ARG. */
9194 static void
9195 note_reg_stored (x, setter, arg)
9196 rtx x, setter ATTRIBUTE_UNUSED;
9197 void *arg;
9199 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9200 if (t->reg == x)
9201 t->set_seen = 1;
9204 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9205 There must be exactly one insn that sets this pseudo; it will be
9206 deleted if all replacements succeed and we can prove that the register
9207 is not used after the loop. */
9209 static void
9210 try_copy_prop (loop, replacement, regno)
9211 const struct loop *loop;
9212 rtx replacement;
9213 unsigned int regno;
9215 /* This is the reg that we are copying from. */
9216 rtx reg_rtx = regno_reg_rtx[regno];
9217 rtx init_insn = 0;
9218 rtx insn;
9219 /* These help keep track of whether we replaced all uses of the reg. */
9220 int replaced_last = 0;
9221 int store_is_first = 0;
9223 for (insn = next_insn_in_loop (loop, loop->scan_start);
9224 insn != NULL_RTX;
9225 insn = next_insn_in_loop (loop, insn))
9227 rtx set;
9229 /* Only substitute within one extended basic block from the initializing
9230 insn. */
9231 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9232 break;
9234 if (! INSN_P (insn))
9235 continue;
9237 /* Is this the initializing insn? */
9238 set = single_set (insn);
9239 if (set
9240 && GET_CODE (SET_DEST (set)) == REG
9241 && REGNO (SET_DEST (set)) == regno)
9243 if (init_insn)
9244 abort ();
9246 init_insn = insn;
9247 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9248 store_is_first = 1;
9251 /* Only substitute after seeing the initializing insn. */
9252 if (init_insn && insn != init_insn)
9254 struct note_reg_stored_arg arg;
9256 replace_loop_regs (insn, reg_rtx, replacement);
9257 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9258 replaced_last = 1;
9260 /* Stop replacing when REPLACEMENT is modified. */
9261 arg.reg = replacement;
9262 arg.set_seen = 0;
9263 note_stores (PATTERN (insn), note_reg_stored, &arg);
9264 if (arg.set_seen)
9265 break;
9268 if (! init_insn)
9269 abort ();
9270 if (apply_change_group ())
9272 if (loop_dump_stream)
9273 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9274 if (store_is_first && replaced_last)
9276 rtx first;
9277 rtx retval_note;
9279 /* Assume we're just deleting INIT_INSN. */
9280 first = init_insn;
9281 /* Look for REG_RETVAL note. If we're deleting the end of
9282 the libcall sequence, the whole sequence can go. */
9283 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
9284 /* If we found a REG_RETVAL note, find the first instruction
9285 in the sequence. */
9286 if (retval_note)
9287 first = XEXP (retval_note, 0);
9289 /* Delete the instructions. */
9290 loop_delete_insns (first, init_insn);
9292 if (loop_dump_stream)
9293 fprintf (loop_dump_stream, ".\n");
9297 /* Replace all the instructions from FIRST up to and including LAST
9298 with NOTE_INSN_DELETED notes. */
9300 static void
9301 loop_delete_insns (first, last)
9302 rtx first;
9303 rtx last;
9305 while (1)
9307 PUT_CODE (first, NOTE);
9308 NOTE_LINE_NUMBER (first) = NOTE_INSN_DELETED;
9309 if (loop_dump_stream)
9310 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
9311 INSN_UID (first));
9313 /* If this was the LAST instructions we're supposed to delete,
9314 we're done. */
9315 if (first == last)
9316 break;
9318 first = NEXT_INSN (first);
9322 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
9323 loop LOOP if the order of the sets of these registers can be
9324 swapped. There must be exactly one insn within the loop that sets
9325 this pseudo followed immediately by a move insn that sets
9326 REPLACEMENT with REGNO. */
9327 static void
9328 try_swap_copy_prop (loop, replacement, regno)
9329 const struct loop *loop;
9330 rtx replacement;
9331 unsigned int regno;
9333 rtx insn;
9334 rtx set = NULL_RTX;
9335 unsigned int new_regno;
9337 new_regno = REGNO (replacement);
9339 for (insn = next_insn_in_loop (loop, loop->scan_start);
9340 insn != NULL_RTX;
9341 insn = next_insn_in_loop (loop, insn))
9343 /* Search for the insn that copies REGNO to NEW_REGNO? */
9344 if (INSN_P (insn)
9345 && (set = single_set (insn))
9346 && GET_CODE (SET_DEST (set)) == REG
9347 && REGNO (SET_DEST (set)) == new_regno
9348 && GET_CODE (SET_SRC (set)) == REG
9349 && REGNO (SET_SRC (set)) == regno)
9350 break;
9353 if (insn != NULL_RTX)
9355 rtx prev_insn;
9356 rtx prev_set;
9358 /* Some DEF-USE info would come in handy here to make this
9359 function more general. For now, just check the previous insn
9360 which is the most likely candidate for setting REGNO. */
9362 prev_insn = PREV_INSN (insn);
9364 if (INSN_P (insn)
9365 && (prev_set = single_set (prev_insn))
9366 && GET_CODE (SET_DEST (prev_set)) == REG
9367 && REGNO (SET_DEST (prev_set)) == regno)
9369 /* We have:
9370 (set (reg regno) (expr))
9371 (set (reg new_regno) (reg regno))
9373 so try converting this to:
9374 (set (reg new_regno) (expr))
9375 (set (reg regno) (reg new_regno))
9377 The former construct is often generated when a global
9378 variable used for an induction variable is shadowed by a
9379 register (NEW_REGNO). The latter construct improves the
9380 chances of GIV replacement and BIV elimination. */
9382 validate_change (prev_insn, &SET_DEST (prev_set),
9383 replacement, 1);
9384 validate_change (insn, &SET_DEST (set),
9385 SET_SRC (set), 1);
9386 validate_change (insn, &SET_SRC (set),
9387 replacement, 1);
9389 if (apply_change_group ())
9391 if (loop_dump_stream)
9392 fprintf (loop_dump_stream,
9393 " Swapped set of reg %d at %d with reg %d at %d.\n",
9394 regno, INSN_UID (insn),
9395 new_regno, INSN_UID (prev_insn));
9397 /* Update first use of REGNO. */
9398 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
9399 REGNO_FIRST_UID (regno) = INSN_UID (insn);
9401 /* Now perform copy propagation to hopefully
9402 remove all uses of REGNO within the loop. */
9403 try_copy_prop (loop, replacement, regno);
9409 /* Replace MEM with its associated pseudo register. This function is
9410 called from load_mems via for_each_rtx. DATA is actually a pointer
9411 to a structure describing the instruction currently being scanned
9412 and the MEM we are currently replacing. */
9414 static int
9415 replace_loop_mem (mem, data)
9416 rtx *mem;
9417 void *data;
9419 loop_replace_args *args = (loop_replace_args *) data;
9420 rtx m = *mem;
9422 if (m == NULL_RTX)
9423 return 0;
9425 switch (GET_CODE (m))
9427 case MEM:
9428 break;
9430 case CONST_DOUBLE:
9431 /* We're not interested in the MEM associated with a
9432 CONST_DOUBLE, so there's no need to traverse into one. */
9433 return -1;
9435 default:
9436 /* This is not a MEM. */
9437 return 0;
9440 if (!rtx_equal_p (args->match, m))
9441 /* This is not the MEM we are currently replacing. */
9442 return 0;
9444 /* Actually replace the MEM. */
9445 validate_change (args->insn, mem, args->replacement, 1);
9447 return 0;
9450 static void
9451 replace_loop_mems (insn, mem, reg)
9452 rtx insn;
9453 rtx mem;
9454 rtx reg;
9456 loop_replace_args args;
9458 args.insn = insn;
9459 args.match = mem;
9460 args.replacement = reg;
9462 for_each_rtx (&insn, replace_loop_mem, &args);
9465 /* Replace one register with another. Called through for_each_rtx; PX points
9466 to the rtx being scanned. DATA is actually a pointer to
9467 a structure of arguments. */
9469 static int
9470 replace_loop_reg (px, data)
9471 rtx *px;
9472 void *data;
9474 rtx x = *px;
9475 loop_replace_args *args = (loop_replace_args *) data;
9477 if (x == NULL_RTX)
9478 return 0;
9480 if (x == args->match)
9481 validate_change (args->insn, px, args->replacement, 1);
9483 return 0;
9486 static void
9487 replace_loop_regs (insn, reg, replacement)
9488 rtx insn;
9489 rtx reg;
9490 rtx replacement;
9492 loop_replace_args args;
9494 args.insn = insn;
9495 args.match = reg;
9496 args.replacement = replacement;
9498 for_each_rtx (&insn, replace_loop_reg, &args);
9501 /* Replace occurrences of the old exit label for the loop with the new
9502 one. DATA is an rtx_pair containing the old and new labels,
9503 respectively. */
9505 static int
9506 replace_label (x, data)
9507 rtx *x;
9508 void *data;
9510 rtx l = *x;
9511 rtx old_label = ((rtx_pair *) data)->r1;
9512 rtx new_label = ((rtx_pair *) data)->r2;
9514 if (l == NULL_RTX)
9515 return 0;
9517 if (GET_CODE (l) != LABEL_REF)
9518 return 0;
9520 if (XEXP (l, 0) != old_label)
9521 return 0;
9523 XEXP (l, 0) = new_label;
9524 ++LABEL_NUSES (new_label);
9525 --LABEL_NUSES (old_label);
9527 return 0;
9530 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
9531 (ignored in the interim). */
9533 static rtx
9534 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
9535 const struct loop *loop ATTRIBUTE_UNUSED;
9536 basic_block where_bb ATTRIBUTE_UNUSED;
9537 rtx where_insn;
9538 rtx pattern;
9540 return emit_insn_after (pattern, where_insn);
9544 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
9545 in basic block WHERE_BB (ignored in the interim) within the loop
9546 otherwise hoist PATTERN into the loop pre-header. */
9549 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
9550 const struct loop *loop;
9551 basic_block where_bb ATTRIBUTE_UNUSED;
9552 rtx where_insn;
9553 rtx pattern;
9555 if (! where_insn)
9556 return loop_insn_hoist (loop, pattern);
9557 return emit_insn_before (pattern, where_insn);
9561 /* Emit call insn for PATTERN before WHERE_INSN in basic block
9562 WHERE_BB (ignored in the interim) within the loop. */
9564 static rtx
9565 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
9566 const struct loop *loop ATTRIBUTE_UNUSED;
9567 basic_block where_bb ATTRIBUTE_UNUSED;
9568 rtx where_insn;
9569 rtx pattern;
9571 return emit_call_insn_before (pattern, where_insn);
9575 /* Hoist insn for PATTERN into the loop pre-header. */
9578 loop_insn_hoist (loop, pattern)
9579 const struct loop *loop;
9580 rtx pattern;
9582 return loop_insn_emit_before (loop, 0, loop->start, pattern);
9586 /* Hoist call insn for PATTERN into the loop pre-header. */
9588 static rtx
9589 loop_call_insn_hoist (loop, pattern)
9590 const struct loop *loop;
9591 rtx pattern;
9593 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
9597 /* Sink insn for PATTERN after the loop end. */
9600 loop_insn_sink (loop, pattern)
9601 const struct loop *loop;
9602 rtx pattern;
9604 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
9608 /* If the loop has multiple exits, emit insn for PATTERN before the
9609 loop to ensure that it will always be executed no matter how the
9610 loop exits. Otherwise, emit the insn for PATTERN after the loop,
9611 since this is slightly more efficient. */
9613 static rtx
9614 loop_insn_sink_or_swim (loop, pattern)
9615 const struct loop *loop;
9616 rtx pattern;
9618 if (loop->exit_count)
9619 return loop_insn_hoist (loop, pattern);
9620 else
9621 return loop_insn_sink (loop, pattern);
9624 static void
9625 loop_ivs_dump (loop, file, verbose)
9626 const struct loop *loop;
9627 FILE *file;
9628 int verbose;
9630 struct iv_class *bl;
9631 int iv_num = 0;
9633 if (! loop || ! file)
9634 return;
9636 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9637 iv_num++;
9639 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
9641 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9643 loop_iv_class_dump (bl, file, verbose);
9644 fputc ('\n', file);
9649 static void
9650 loop_iv_class_dump (bl, file, verbose)
9651 const struct iv_class *bl;
9652 FILE *file;
9653 int verbose ATTRIBUTE_UNUSED;
9655 struct induction *v;
9656 rtx incr;
9657 int i;
9659 if (! bl || ! file)
9660 return;
9662 fprintf (file, "IV class for reg %d, benefit %d\n",
9663 bl->regno, bl->total_benefit);
9665 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
9666 if (bl->initial_value)
9668 fprintf (file, ", init val: ");
9669 print_simple_rtl (file, bl->initial_value);
9671 if (bl->initial_test)
9673 fprintf (file, ", init test: ");
9674 print_simple_rtl (file, bl->initial_test);
9676 fputc ('\n', file);
9678 if (bl->final_value)
9680 fprintf (file, " Final val: ");
9681 print_simple_rtl (file, bl->final_value);
9682 fputc ('\n', file);
9685 if ((incr = biv_total_increment (bl)))
9687 fprintf (file, " Total increment: ");
9688 print_simple_rtl (file, incr);
9689 fputc ('\n', file);
9692 /* List the increments. */
9693 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
9695 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
9696 print_simple_rtl (file, v->add_val);
9697 fputc ('\n', file);
9700 /* List the givs. */
9701 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
9703 fprintf (file, " Giv%d: insn %d, benefit %d, ",
9704 i, INSN_UID (v->insn), v->benefit);
9705 if (v->giv_type == DEST_ADDR)
9706 print_simple_rtl (file, v->mem);
9707 else
9708 print_simple_rtl (file, single_set (v->insn));
9709 fputc ('\n', file);
9714 static void
9715 loop_biv_dump (v, file, verbose)
9716 const struct induction *v;
9717 FILE *file;
9718 int verbose;
9720 if (! v || ! file)
9721 return;
9723 fprintf (file,
9724 "Biv %d: insn %d",
9725 REGNO (v->dest_reg), INSN_UID (v->insn));
9726 fprintf (file, " const ");
9727 print_simple_rtl (file, v->add_val);
9729 if (verbose && v->final_value)
9731 fputc ('\n', file);
9732 fprintf (file, " final ");
9733 print_simple_rtl (file, v->final_value);
9736 fputc ('\n', file);
9740 static void
9741 loop_giv_dump (v, file, verbose)
9742 const struct induction *v;
9743 FILE *file;
9744 int verbose;
9746 if (! v || ! file)
9747 return;
9749 if (v->giv_type == DEST_REG)
9750 fprintf (file, "Giv %d: insn %d",
9751 REGNO (v->dest_reg), INSN_UID (v->insn));
9752 else
9753 fprintf (file, "Dest address: insn %d",
9754 INSN_UID (v->insn));
9756 fprintf (file, " src reg %d benefit %d",
9757 REGNO (v->src_reg), v->benefit);
9758 fprintf (file, " lifetime %d",
9759 v->lifetime);
9761 if (v->replaceable)
9762 fprintf (file, " replaceable");
9764 if (v->no_const_addval)
9765 fprintf (file, " ncav");
9767 if (v->ext_dependant)
9769 switch (GET_CODE (v->ext_dependant))
9771 case SIGN_EXTEND:
9772 fprintf (file, " ext se");
9773 break;
9774 case ZERO_EXTEND:
9775 fprintf (file, " ext ze");
9776 break;
9777 case TRUNCATE:
9778 fprintf (file, " ext tr");
9779 break;
9780 default:
9781 abort ();
9785 fputc ('\n', file);
9786 fprintf (file, " mult ");
9787 print_simple_rtl (file, v->mult_val);
9789 fputc ('\n', file);
9790 fprintf (file, " add ");
9791 print_simple_rtl (file, v->add_val);
9793 if (verbose && v->final_value)
9795 fputc ('\n', file);
9796 fprintf (file, " final ");
9797 print_simple_rtl (file, v->final_value);
9800 fputc ('\n', file);
9804 void
9805 debug_ivs (loop)
9806 const struct loop *loop;
9808 loop_ivs_dump (loop, stderr, 1);
9812 void
9813 debug_iv_class (bl)
9814 const struct iv_class *bl;
9816 loop_iv_class_dump (bl, stderr, 1);
9820 void
9821 debug_biv (v)
9822 const struct induction *v;
9824 loop_biv_dump (v, stderr, 1);
9828 void
9829 debug_giv (v)
9830 const struct induction *v;
9832 loop_giv_dump (v, stderr, 1);
9836 #define LOOP_BLOCK_NUM_1(INSN) \
9837 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
9839 /* The notes do not have an assigned block, so look at the next insn. */
9840 #define LOOP_BLOCK_NUM(INSN) \
9841 ((INSN) ? (GET_CODE (INSN) == NOTE \
9842 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
9843 : LOOP_BLOCK_NUM_1 (INSN)) \
9844 : -1)
9846 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
9848 static void
9849 loop_dump_aux (loop, file, verbose)
9850 const struct loop *loop;
9851 FILE *file;
9852 int verbose ATTRIBUTE_UNUSED;
9854 rtx label;
9856 if (! loop || ! file)
9857 return;
9859 /* Print diagnostics to compare our concept of a loop with
9860 what the loop notes say. */
9861 if (! PREV_INSN (loop->first->head)
9862 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
9863 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
9864 != NOTE_INSN_LOOP_BEG)
9865 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
9866 INSN_UID (PREV_INSN (loop->first->head)));
9867 if (! NEXT_INSN (loop->last->end)
9868 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
9869 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
9870 != NOTE_INSN_LOOP_END)
9871 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
9872 INSN_UID (NEXT_INSN (loop->last->end)));
9874 if (loop->start)
9876 fprintf (file,
9877 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
9878 LOOP_BLOCK_NUM (loop->start),
9879 LOOP_INSN_UID (loop->start),
9880 LOOP_BLOCK_NUM (loop->cont),
9881 LOOP_INSN_UID (loop->cont),
9882 LOOP_BLOCK_NUM (loop->cont),
9883 LOOP_INSN_UID (loop->cont),
9884 LOOP_BLOCK_NUM (loop->vtop),
9885 LOOP_INSN_UID (loop->vtop),
9886 LOOP_BLOCK_NUM (loop->end),
9887 LOOP_INSN_UID (loop->end));
9888 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
9889 LOOP_BLOCK_NUM (loop->top),
9890 LOOP_INSN_UID (loop->top),
9891 LOOP_BLOCK_NUM (loop->scan_start),
9892 LOOP_INSN_UID (loop->scan_start));
9893 fprintf (file, ";; exit_count %d", loop->exit_count);
9894 if (loop->exit_count)
9896 fputs (", labels:", file);
9897 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
9899 fprintf (file, " %d ",
9900 LOOP_INSN_UID (XEXP (label, 0)));
9903 fputs ("\n", file);
9905 /* This can happen when a marked loop appears as two nested loops,
9906 say from while (a || b) {}. The inner loop won't match
9907 the loop markers but the outer one will. */
9908 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
9909 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
9913 /* Call this function from the debugger to dump LOOP. */
9915 void
9916 debug_loop (loop)
9917 const struct loop *loop;
9919 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
9922 /* Call this function from the debugger to dump LOOPS. */
9924 void
9925 debug_loops (loops)
9926 const struct loops *loops;
9928 flow_loops_dump (loops, stderr, loop_dump_aux, 1);