Fix ia64-linux miscompilation or tcl/generic/tclCompExpr.c
[official-gcc.git] / gcc / loop.c
blobc5e5c4c2ccb3e10faad709b113b0f5cb7a482743
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "insn-flags.h"
48 #include "regs.h"
49 #include "recog.h"
50 #include "flags.h"
51 #include "real.h"
52 #include "loop.h"
53 #include "cselib.h"
54 #include "except.h"
55 #include "toplev.h"
57 /* Vector mapping INSN_UIDs to luids.
58 The luids are like uids but increase monotonically always.
59 We use them to see whether a jump comes from outside a given loop. */
61 int *uid_luid;
63 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
64 number the insn is contained in. */
66 struct loop **uid_loop;
68 /* 1 + largest uid of any insn. */
70 int max_uid_for_loop;
72 /* 1 + luid of last insn. */
74 static int max_luid;
76 /* Number of loops detected in current function. Used as index to the
77 next few tables. */
79 static int max_loop_num;
81 /* Indexed by register number, contains the number of times the reg
82 is set during the loop being scanned.
83 During code motion, a negative value indicates a reg that has been
84 made a candidate; in particular -2 means that it is an candidate that
85 we know is equal to a constant and -1 means that it is an candidate
86 not known equal to a constant.
87 After code motion, regs moved have 0 (which is accurate now)
88 while the failed candidates have the original number of times set.
90 Therefore, at all times, == 0 indicates an invariant register;
91 < 0 a conditionally invariant one. */
93 static varray_type set_in_loop;
95 /* Original value of set_in_loop; same except that this value
96 is not set negative for a reg whose sets have been made candidates
97 and not set to 0 for a reg that is moved. */
99 static varray_type n_times_set;
101 /* Index by register number, 1 indicates that the register
102 cannot be moved or strength reduced. */
104 static varray_type may_not_optimize;
106 /* Contains the insn in which a register was used if it was used
107 exactly once; contains const0_rtx if it was used more than once. */
109 static varray_type reg_single_usage;
111 /* Nonzero means reg N has already been moved out of one loop.
112 This reduces the desire to move it out of another. */
114 static char *moved_once;
116 /* List of MEMs that are stored in this loop. */
118 static rtx loop_store_mems;
120 /* The insn where the first of these was found. */
121 static rtx first_loop_store_insn;
123 typedef struct loop_mem_info {
124 rtx mem; /* The MEM itself. */
125 rtx reg; /* Corresponding pseudo, if any. */
126 int optimize; /* Nonzero if we can optimize access to this MEM. */
127 } loop_mem_info;
129 /* Array of MEMs that are used (read or written) in this loop, but
130 cannot be aliased by anything in this loop, except perhaps
131 themselves. In other words, if loop_mems[i] is altered during the
132 loop, it is altered by an expression that is rtx_equal_p to it. */
134 static loop_mem_info *loop_mems;
136 /* The index of the next available slot in LOOP_MEMS. */
138 static int loop_mems_idx;
140 /* The number of elements allocated in LOOP_MEMs. */
142 static int loop_mems_allocated;
144 /* Nonzero if we don't know what MEMs were changed in the current
145 loop. This happens if the loop contains a call (in which case
146 `loop_info->has_call' will also be set) or if we store into more
147 than NUM_STORES MEMs. */
149 static int unknown_address_altered;
151 /* The above doesn't count any readonly memory locations that are stored.
152 This does. */
154 static int unknown_constant_address_altered;
156 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
157 static int num_movables;
159 /* Count of memory write instructions discovered in the loop. */
160 static int num_mem_sets;
162 /* Bound on pseudo register number before loop optimization.
163 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
164 unsigned int max_reg_before_loop;
166 /* The value to pass to the next call of reg_scan_update. */
167 static int loop_max_reg;
169 /* This obstack is used in product_cheap_p to allocate its rtl. It
170 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
171 If we used the same obstack that it did, we would be deallocating
172 that array. */
174 static struct obstack temp_obstack;
176 /* This is where the pointer to the obstack being used for RTL is stored. */
178 extern struct obstack *rtl_obstack;
180 #define obstack_chunk_alloc xmalloc
181 #define obstack_chunk_free free
183 /* During the analysis of a loop, a chain of `struct movable's
184 is made to record all the movable insns found.
185 Then the entire chain can be scanned to decide which to move. */
187 struct movable
189 rtx insn; /* A movable insn */
190 rtx set_src; /* The expression this reg is set from. */
191 rtx set_dest; /* The destination of this SET. */
192 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
193 of any registers used within the LIBCALL. */
194 int consec; /* Number of consecutive following insns
195 that must be moved with this one. */
196 unsigned int regno; /* The register it sets */
197 short lifetime; /* lifetime of that register;
198 may be adjusted when matching movables
199 that load the same value are found. */
200 short savings; /* Number of insns we can move for this reg,
201 including other movables that force this
202 or match this one. */
203 unsigned int cond : 1; /* 1 if only conditionally movable */
204 unsigned int force : 1; /* 1 means MUST move this insn */
205 unsigned int global : 1; /* 1 means reg is live outside this loop */
206 /* If PARTIAL is 1, GLOBAL means something different:
207 that the reg is live outside the range from where it is set
208 to the following label. */
209 unsigned int done : 1; /* 1 inhibits further processing of this */
211 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
212 In particular, moving it does not make it
213 invariant. */
214 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
215 load SRC, rather than copying INSN. */
216 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
217 first insn of a consecutive sets group. */
218 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
219 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
220 that we should avoid changing when clearing
221 the rest of the reg. */
222 struct movable *match; /* First entry for same value */
223 struct movable *forces; /* An insn that must be moved if this is */
224 struct movable *next;
227 static struct movable *the_movables;
229 FILE *loop_dump_stream;
231 /* Forward declarations. */
233 static void verify_dominator PARAMS ((struct loop *));
234 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
235 static void mark_loop_jump PARAMS ((rtx, struct loop *));
236 static void prescan_loop PARAMS ((struct loop *));
237 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
238 static int consec_sets_invariant_p PARAMS ((const struct loop *,
239 rtx, int, rtx));
240 static int labels_in_range_p PARAMS ((rtx, int));
241 static void count_one_set PARAMS ((rtx, rtx, varray_type, rtx *));
243 static void count_loop_regs_set PARAMS ((const struct loop*,
244 varray_type, varray_type,
245 int *, int));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct movable *));
256 static void force_movables PARAMS ((struct movable *));
257 static void combine_movables PARAMS ((struct movable *, int));
258 static int regs_match_p PARAMS ((rtx, rtx, struct movable *));
259 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct movable *));
260 static void add_label_notes PARAMS ((rtx, rtx));
261 static void move_movables PARAMS ((struct loop *loop, struct movable *,
262 int, int, int));
263 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
264 static void strength_reduce PARAMS ((struct loop *, int, int));
265 static void find_single_use_in_loop PARAMS ((rtx, rtx, varray_type));
266 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
267 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
268 static void record_biv PARAMS ((struct induction *, rtx, rtx, rtx, rtx, rtx *,
269 int, int));
270 static void check_final_value PARAMS ((const struct loop *,
271 struct induction *));
272 static void record_giv PARAMS ((const struct loop *, struct induction *,
273 rtx, rtx, rtx, rtx, rtx, int, enum g_types,
274 int, int, rtx *));
275 static void update_giv_derive PARAMS ((const struct loop *, rtx));
276 static int basic_induction_var PARAMS ((const struct loop *, rtx,
277 enum machine_mode, rtx, rtx,
278 rtx *, rtx *, rtx **));
279 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, int *));
280 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
281 rtx *, rtx *, int, int *, enum machine_mode));
282 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
283 rtx, rtx, rtx *, rtx *, rtx *));
284 static int check_dbra_loop PARAMS ((struct loop *, int));
285 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
286 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
287 static void combine_givs PARAMS ((struct iv_class *));
288 struct recombine_givs_stats;
289 static int find_life_end PARAMS ((rtx, struct recombine_givs_stats *,
290 rtx, rtx));
291 static void recombine_givs PARAMS ((const struct loop *, struct iv_class *,
292 int));
293 static int product_cheap_p PARAMS ((rtx, rtx));
294 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
295 int, int, int));
296 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
297 struct iv_class *, int, rtx));
298 static int last_use_this_basic_block PARAMS ((rtx, rtx));
299 static void record_initial PARAMS ((rtx, rtx, void *));
300 static void update_reg_last_use PARAMS ((rtx, rtx));
301 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
302 static void load_mems_and_recount_loop_regs_set PARAMS ((const struct loop*,
303 int *));
304 static void load_mems PARAMS ((const struct loop *));
305 static int insert_loop_mem PARAMS ((rtx *, void *));
306 static int replace_loop_mem PARAMS ((rtx *, void *));
307 static int replace_loop_reg PARAMS ((rtx *, void *));
308 static void note_reg_stored PARAMS ((rtx, rtx, void *));
309 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
310 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
311 unsigned int));
312 static int replace_label PARAMS ((rtx *, void *));
313 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
314 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
316 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
317 void debug_loop PARAMS ((const struct loop *));
319 typedef struct rtx_and_int {
320 rtx r;
321 int i;
322 } rtx_and_int;
324 typedef struct rtx_pair {
325 rtx r1;
326 rtx r2;
327 } rtx_pair;
329 /* Nonzero iff INSN is between START and END, inclusive. */
330 #define INSN_IN_RANGE_P(INSN, START, END) \
331 (INSN_UID (INSN) < max_uid_for_loop \
332 && INSN_LUID (INSN) >= INSN_LUID (START) \
333 && INSN_LUID (INSN) <= INSN_LUID (END))
335 /* Indirect_jump_in_function is computed once per function. */
336 static int indirect_jump_in_function;
337 static int indirect_jump_in_function_p PARAMS ((rtx));
339 static int compute_luids PARAMS ((rtx, rtx, int));
341 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
342 struct induction *, rtx));
344 /* Relative gain of eliminating various kinds of operations. */
345 static int add_cost;
346 #if 0
347 static int shift_cost;
348 static int mult_cost;
349 #endif
351 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
352 copy the value of the strength reduced giv to its original register. */
353 static int copy_cost;
355 /* Cost of using a register, to normalize the benefits of a giv. */
356 static int reg_address_cost;
358 void
359 init_loop ()
361 char *free_point = (char *) oballoc (1);
362 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
364 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
366 reg_address_cost = address_cost (reg, SImode);
368 /* We multiply by 2 to reconcile the difference in scale between
369 these two ways of computing costs. Otherwise the cost of a copy
370 will be far less than the cost of an add. */
372 copy_cost = 2 * 2;
374 /* Free the objects we just allocated. */
375 obfree (free_point);
377 /* Initialize the obstack used for rtl in product_cheap_p. */
378 gcc_obstack_init (&temp_obstack);
381 /* Compute the mapping from uids to luids.
382 LUIDs are numbers assigned to insns, like uids,
383 except that luids increase monotonically through the code.
384 Start at insn START and stop just before END. Assign LUIDs
385 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
386 static int
387 compute_luids (start, end, prev_luid)
388 rtx start, end;
389 int prev_luid;
391 int i;
392 rtx insn;
394 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
396 if (INSN_UID (insn) >= max_uid_for_loop)
397 continue;
398 /* Don't assign luids to line-number NOTEs, so that the distance in
399 luids between two insns is not affected by -g. */
400 if (GET_CODE (insn) != NOTE
401 || NOTE_LINE_NUMBER (insn) <= 0)
402 uid_luid[INSN_UID (insn)] = ++i;
403 else
404 /* Give a line number note the same luid as preceding insn. */
405 uid_luid[INSN_UID (insn)] = i;
407 return i + 1;
410 /* Entry point of this file. Perform loop optimization
411 on the current function. F is the first insn of the function
412 and DUMPFILE is a stream for output of a trace of actions taken
413 (or 0 if none should be output). */
415 void
416 loop_optimize (f, dumpfile, flags)
417 /* f is the first instruction of a chain of insns for one function */
418 rtx f;
419 FILE *dumpfile;
420 int flags;
422 register rtx insn;
423 register int i;
424 struct loops loops_data;
425 struct loops *loops = &loops_data;
426 struct loop_info *loops_info;
428 loop_dump_stream = dumpfile;
430 init_recog_no_volatile ();
432 max_reg_before_loop = max_reg_num ();
433 loop_max_reg = max_reg_before_loop;
435 regs_may_share = 0;
437 /* Count the number of loops. */
439 max_loop_num = 0;
440 for (insn = f; insn; insn = NEXT_INSN (insn))
442 if (GET_CODE (insn) == NOTE
443 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
444 max_loop_num++;
447 /* Don't waste time if no loops. */
448 if (max_loop_num == 0)
449 return;
451 loops->num = max_loop_num;
453 moved_once = (char *) xcalloc (max_reg_before_loop, sizeof (char));
455 /* Get size to use for tables indexed by uids.
456 Leave some space for labels allocated by find_and_verify_loops. */
457 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
459 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
460 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
461 sizeof (struct loop *));
463 /* Allocate storage for array of loops. */
464 loops->array = (struct loop *)
465 xcalloc (loops->num, sizeof (struct loop));
467 /* Find and process each loop.
468 First, find them, and record them in order of their beginnings. */
469 find_and_verify_loops (f, loops);
471 /* Allocate and initialize auxiliary loop information. */
472 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
473 for (i = 0; i < loops->num; i++)
474 loops->array[i].aux = loops_info + i;
476 /* Now find all register lifetimes. This must be done after
477 find_and_verify_loops, because it might reorder the insns in the
478 function. */
479 reg_scan (f, max_reg_before_loop, 1);
481 /* This must occur after reg_scan so that registers created by gcse
482 will have entries in the register tables.
484 We could have added a call to reg_scan after gcse_main in toplev.c,
485 but moving this call to init_alias_analysis is more efficient. */
486 init_alias_analysis ();
488 /* See if we went too far. Note that get_max_uid already returns
489 one more that the maximum uid of all insn. */
490 if (get_max_uid () > max_uid_for_loop)
491 abort ();
492 /* Now reset it to the actual size we need. See above. */
493 max_uid_for_loop = get_max_uid ();
495 /* find_and_verify_loops has already called compute_luids, but it
496 might have rearranged code afterwards, so we need to recompute
497 the luids now. */
498 max_luid = compute_luids (f, NULL_RTX, 0);
500 /* Don't leave gaps in uid_luid for insns that have been
501 deleted. It is possible that the first or last insn
502 using some register has been deleted by cross-jumping.
503 Make sure that uid_luid for that former insn's uid
504 points to the general area where that insn used to be. */
505 for (i = 0; i < max_uid_for_loop; i++)
507 uid_luid[0] = uid_luid[i];
508 if (uid_luid[0] != 0)
509 break;
511 for (i = 0; i < max_uid_for_loop; i++)
512 if (uid_luid[i] == 0)
513 uid_luid[i] = uid_luid[i - 1];
515 /* Determine if the function has indirect jump. On some systems
516 this prevents low overhead loop instructions from being used. */
517 indirect_jump_in_function = indirect_jump_in_function_p (f);
519 /* Now scan the loops, last ones first, since this means inner ones are done
520 before outer ones. */
521 for (i = max_loop_num - 1; i >= 0; i--)
523 struct loop *loop = &loops->array[i];
525 if (! loop->invalid && loop->end)
526 scan_loop (loop, flags);
529 /* If there were lexical blocks inside the loop, they have been
530 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
531 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
532 the BLOCKs as well. */
533 if (write_symbols != NO_DEBUG)
534 reorder_blocks ();
536 end_alias_analysis ();
538 /* Clean up. */
539 free (moved_once);
540 free (uid_luid);
541 free (uid_loop);
542 free (loops_info);
543 free (loops->array);
546 /* Returns the next insn, in execution order, after INSN. START and
547 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
548 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
549 insn-stream; it is used with loops that are entered near the
550 bottom. */
552 static rtx
553 next_insn_in_loop (loop, insn)
554 const struct loop *loop;
555 rtx insn;
557 insn = NEXT_INSN (insn);
559 if (insn == loop->end)
561 if (loop->top)
562 /* Go to the top of the loop, and continue there. */
563 insn = loop->top;
564 else
565 /* We're done. */
566 insn = NULL_RTX;
569 if (insn == loop->scan_start)
570 /* We're done. */
571 insn = NULL_RTX;
573 return insn;
576 /* Optimize one loop described by LOOP. */
578 /* ??? Could also move memory writes out of loops if the destination address
579 is invariant, the source is invariant, the memory write is not volatile,
580 and if we can prove that no read inside the loop can read this address
581 before the write occurs. If there is a read of this address after the
582 write, then we can also mark the memory read as invariant. */
584 static void
585 scan_loop (loop, flags)
586 struct loop *loop;
587 int flags;
589 register int i;
590 rtx loop_start = loop->start;
591 rtx loop_end = loop->end;
592 /* Additional information about the current loop being processed
593 that is used to compute the number of loop iterations for loop
594 unrolling and doloop optimization. */
595 struct loop_info *loop_info = LOOP_INFO (loop);
596 rtx p;
597 /* 1 if we are scanning insns that could be executed zero times. */
598 int maybe_never = 0;
599 /* 1 if we are scanning insns that might never be executed
600 due to a subroutine call which might exit before they are reached. */
601 int call_passed = 0;
602 /* Jump insn that enters the loop, or 0 if control drops in. */
603 rtx loop_entry_jump = 0;
604 /* Number of insns in the loop. */
605 int insn_count;
606 int tem;
607 rtx temp, update_start, update_end;
608 /* The SET from an insn, if it is the only SET in the insn. */
609 rtx set, set1;
610 /* Chain describing insns movable in current loop. */
611 struct movable *movables = 0;
612 /* Last element in `movables' -- so we can add elements at the end. */
613 struct movable *last_movable = 0;
614 /* Ratio of extra register life span we can justify
615 for saving an instruction. More if loop doesn't call subroutines
616 since in that case saving an insn makes more difference
617 and more registers are available. */
618 int threshold;
619 /* Nonzero if we are scanning instructions in a sub-loop. */
620 int loop_depth = 0;
621 int nregs;
623 loop->top = 0;
625 /* Determine whether this loop starts with a jump down to a test at
626 the end. This will occur for a small number of loops with a test
627 that is too complex to duplicate in front of the loop.
629 We search for the first insn or label in the loop, skipping NOTEs.
630 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
631 (because we might have a loop executed only once that contains a
632 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
633 (in case we have a degenerate loop).
635 Note that if we mistakenly think that a loop is entered at the top
636 when, in fact, it is entered at the exit test, the only effect will be
637 slightly poorer optimization. Making the opposite error can generate
638 incorrect code. Since very few loops now start with a jump to the
639 exit test, the code here to detect that case is very conservative. */
641 for (p = NEXT_INSN (loop_start);
642 p != loop_end
643 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
644 && (GET_CODE (p) != NOTE
645 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
646 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
647 p = NEXT_INSN (p))
650 loop->scan_start = p;
652 /* Set up variables describing this loop. */
653 prescan_loop (loop);
654 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
656 /* If loop has a jump before the first label,
657 the true entry is the target of that jump.
658 Start scan from there.
659 But record in LOOP->TOP the place where the end-test jumps
660 back to so we can scan that after the end of the loop. */
661 if (GET_CODE (p) == JUMP_INSN)
663 loop_entry_jump = p;
665 /* Loop entry must be unconditional jump (and not a RETURN) */
666 if (any_uncondjump_p (p)
667 && JUMP_LABEL (p) != 0
668 /* Check to see whether the jump actually
669 jumps out of the loop (meaning it's no loop).
670 This case can happen for things like
671 do {..} while (0). If this label was generated previously
672 by loop, we can't tell anything about it and have to reject
673 the loop. */
674 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
676 loop->top = next_label (loop->scan_start);
677 loop->scan_start = JUMP_LABEL (p);
681 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
682 as required by loop_reg_used_before_p. So skip such loops. (This
683 test may never be true, but it's best to play it safe.)
685 Also, skip loops where we do not start scanning at a label. This
686 test also rejects loops starting with a JUMP_INSN that failed the
687 test above. */
689 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
690 || GET_CODE (loop->scan_start) != CODE_LABEL)
692 if (loop_dump_stream)
693 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
694 INSN_UID (loop_start), INSN_UID (loop_end));
695 return;
698 /* Count number of times each reg is set during this loop.
699 Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
700 the setting of register I. Set VARRAY_RTX (reg_single_usage, I). */
702 /* Allocate extra space for REGS that might be created by
703 load_mems. We allocate a little extra slop as well, in the hopes
704 that even after the moving of movables creates some new registers
705 we won't have to reallocate these arrays. However, we do grow
706 the arrays, if necessary, in load_mems_recount_loop_regs_set. */
707 nregs = max_reg_num () + loop_mems_idx + 16;
708 VARRAY_INT_INIT (set_in_loop, nregs, "set_in_loop");
709 VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
710 VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
711 VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
713 count_loop_regs_set (loop, may_not_optimize, reg_single_usage,
714 &insn_count, nregs);
716 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
718 VARRAY_CHAR (may_not_optimize, i) = 1;
719 VARRAY_INT (set_in_loop, i) = 1;
722 #ifdef AVOID_CCMODE_COPIES
723 /* Don't try to move insns which set CC registers if we should not
724 create CCmode register copies. */
725 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
726 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
727 VARRAY_CHAR (may_not_optimize, i) = 1;
728 #endif
730 bcopy ((char *) &set_in_loop->data,
731 (char *) &n_times_set->data, nregs * sizeof (int));
733 if (loop_dump_stream)
735 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
736 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
737 if (loop->cont)
738 fprintf (loop_dump_stream, "Continue at insn %d.\n",
739 INSN_UID (loop->cont));
742 /* Scan through the loop finding insns that are safe to move.
743 Set set_in_loop negative for the reg being set, so that
744 this reg will be considered invariant for subsequent insns.
745 We consider whether subsequent insns use the reg
746 in deciding whether it is worth actually moving.
748 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
749 and therefore it is possible that the insns we are scanning
750 would never be executed. At such times, we must make sure
751 that it is safe to execute the insn once instead of zero times.
752 When MAYBE_NEVER is 0, all insns will be executed at least once
753 so that is not a problem. */
755 for (p = next_insn_in_loop (loop, loop->scan_start);
756 p != NULL_RTX;
757 p = next_insn_in_loop (loop, p))
759 if (GET_CODE (p) == INSN
760 && (set = single_set (p))
761 && GET_CODE (SET_DEST (set)) == REG
762 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
764 int tem1 = 0;
765 int tem2 = 0;
766 int move_insn = 0;
767 rtx src = SET_SRC (set);
768 rtx dependencies = 0;
770 /* Figure out what to use as a source of this insn. If a REG_EQUIV
771 note is given or if a REG_EQUAL note with a constant operand is
772 specified, use it as the source and mark that we should move
773 this insn by calling emit_move_insn rather that duplicating the
774 insn.
776 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
777 is present. */
778 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
779 if (temp)
780 src = XEXP (temp, 0), move_insn = 1;
781 else
783 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
784 if (temp && CONSTANT_P (XEXP (temp, 0)))
785 src = XEXP (temp, 0), move_insn = 1;
786 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
788 src = XEXP (temp, 0);
789 /* A libcall block can use regs that don't appear in
790 the equivalent expression. To move the libcall,
791 we must move those regs too. */
792 dependencies = libcall_other_reg (p, src);
796 /* Don't try to optimize a register that was made
797 by loop-optimization for an inner loop.
798 We don't know its life-span, so we can't compute the benefit. */
799 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
801 else if (/* The register is used in basic blocks other
802 than the one where it is set (meaning that
803 something after this point in the loop might
804 depend on its value before the set). */
805 ! reg_in_basic_block_p (p, SET_DEST (set))
806 /* And the set is not guaranteed to be executed one
807 the loop starts, or the value before the set is
808 needed before the set occurs...
810 ??? Note we have quadratic behaviour here, mitigated
811 by the fact that the previous test will often fail for
812 large loops. Rather than re-scanning the entire loop
813 each time for register usage, we should build tables
814 of the register usage and use them here instead. */
815 && (maybe_never
816 || loop_reg_used_before_p (loop, set, p)))
817 /* It is unsafe to move the set.
819 This code used to consider it OK to move a set of a variable
820 which was not created by the user and not used in an exit test.
821 That behavior is incorrect and was removed. */
823 else if ((tem = loop_invariant_p (loop, src))
824 && (dependencies == 0
825 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
826 && (VARRAY_INT (set_in_loop,
827 REGNO (SET_DEST (set))) == 1
828 || (tem1
829 = consec_sets_invariant_p
830 (loop, SET_DEST (set),
831 VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))),
832 p)))
833 /* If the insn can cause a trap (such as divide by zero),
834 can't move it unless it's guaranteed to be executed
835 once loop is entered. Even a function call might
836 prevent the trap insn from being reached
837 (since it might exit!) */
838 && ! ((maybe_never || call_passed)
839 && may_trap_p (src)))
841 register struct movable *m;
842 register int regno = REGNO (SET_DEST (set));
844 /* A potential lossage is where we have a case where two insns
845 can be combined as long as they are both in the loop, but
846 we move one of them outside the loop. For large loops,
847 this can lose. The most common case of this is the address
848 of a function being called.
850 Therefore, if this register is marked as being used exactly
851 once if we are in a loop with calls (a "large loop"), see if
852 we can replace the usage of this register with the source
853 of this SET. If we can, delete this insn.
855 Don't do this if P has a REG_RETVAL note or if we have
856 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
858 if (loop_info->has_call
859 && VARRAY_RTX (reg_single_usage, regno) != 0
860 && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
861 && REGNO_FIRST_UID (regno) == INSN_UID (p)
862 && (REGNO_LAST_UID (regno)
863 == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
864 && VARRAY_INT (set_in_loop, regno) == 1
865 && ! side_effects_p (SET_SRC (set))
866 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
867 && (! SMALL_REGISTER_CLASSES
868 || (! (GET_CODE (SET_SRC (set)) == REG
869 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
870 /* This test is not redundant; SET_SRC (set) might be
871 a call-clobbered register and the life of REGNO
872 might span a call. */
873 && ! modified_between_p (SET_SRC (set), p,
874 VARRAY_RTX
875 (reg_single_usage, regno))
876 && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
877 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
878 VARRAY_RTX
879 (reg_single_usage, regno)))
881 /* Replace any usage in a REG_EQUAL note. Must copy the
882 new source, so that we don't get rtx sharing between the
883 SET_SOURCE and REG_NOTES of insn p. */
884 REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
885 = replace_rtx (REG_NOTES (VARRAY_RTX
886 (reg_single_usage, regno)),
887 SET_DEST (set), copy_rtx (SET_SRC (set)));
889 PUT_CODE (p, NOTE);
890 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
891 NOTE_SOURCE_FILE (p) = 0;
892 VARRAY_INT (set_in_loop, regno) = 0;
893 continue;
896 m = (struct movable *) alloca (sizeof (struct movable));
897 m->next = 0;
898 m->insn = p;
899 m->set_src = src;
900 m->dependencies = dependencies;
901 m->set_dest = SET_DEST (set);
902 m->force = 0;
903 m->consec = VARRAY_INT (set_in_loop,
904 REGNO (SET_DEST (set))) - 1;
905 m->done = 0;
906 m->forces = 0;
907 m->partial = 0;
908 m->move_insn = move_insn;
909 m->move_insn_first = 0;
910 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
911 m->savemode = VOIDmode;
912 m->regno = regno;
913 /* Set M->cond if either loop_invariant_p
914 or consec_sets_invariant_p returned 2
915 (only conditionally invariant). */
916 m->cond = ((tem | tem1 | tem2) > 1);
917 m->global = (uid_luid[REGNO_LAST_UID (regno)]
918 > INSN_LUID (loop_end)
919 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
920 m->match = 0;
921 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
922 - uid_luid[REGNO_FIRST_UID (regno)]);
923 m->savings = VARRAY_INT (n_times_set, regno);
924 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
925 m->savings += libcall_benefit (p);
926 VARRAY_INT (set_in_loop, regno) = move_insn ? -2 : -1;
927 /* Add M to the end of the chain MOVABLES. */
928 if (movables == 0)
929 movables = m;
930 else
931 last_movable->next = m;
932 last_movable = m;
934 if (m->consec > 0)
936 /* It is possible for the first instruction to have a
937 REG_EQUAL note but a non-invariant SET_SRC, so we must
938 remember the status of the first instruction in case
939 the last instruction doesn't have a REG_EQUAL note. */
940 m->move_insn_first = m->move_insn;
942 /* Skip this insn, not checking REG_LIBCALL notes. */
943 p = next_nonnote_insn (p);
944 /* Skip the consecutive insns, if there are any. */
945 p = skip_consec_insns (p, m->consec);
946 /* Back up to the last insn of the consecutive group. */
947 p = prev_nonnote_insn (p);
949 /* We must now reset m->move_insn, m->is_equiv, and possibly
950 m->set_src to correspond to the effects of all the
951 insns. */
952 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
953 if (temp)
954 m->set_src = XEXP (temp, 0), m->move_insn = 1;
955 else
957 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
958 if (temp && CONSTANT_P (XEXP (temp, 0)))
959 m->set_src = XEXP (temp, 0), m->move_insn = 1;
960 else
961 m->move_insn = 0;
964 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
967 /* If this register is always set within a STRICT_LOW_PART
968 or set to zero, then its high bytes are constant.
969 So clear them outside the loop and within the loop
970 just load the low bytes.
971 We must check that the machine has an instruction to do so.
972 Also, if the value loaded into the register
973 depends on the same register, this cannot be done. */
974 else if (SET_SRC (set) == const0_rtx
975 && GET_CODE (NEXT_INSN (p)) == INSN
976 && (set1 = single_set (NEXT_INSN (p)))
977 && GET_CODE (set1) == SET
978 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
979 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
980 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
981 == SET_DEST (set))
982 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
984 register int regno = REGNO (SET_DEST (set));
985 if (VARRAY_INT (set_in_loop, regno) == 2)
987 register struct movable *m;
988 m = (struct movable *) alloca (sizeof (struct movable));
989 m->next = 0;
990 m->insn = p;
991 m->set_dest = SET_DEST (set);
992 m->dependencies = 0;
993 m->force = 0;
994 m->consec = 0;
995 m->done = 0;
996 m->forces = 0;
997 m->move_insn = 0;
998 m->move_insn_first = 0;
999 m->partial = 1;
1000 /* If the insn may not be executed on some cycles,
1001 we can't clear the whole reg; clear just high part.
1002 Not even if the reg is used only within this loop.
1003 Consider this:
1004 while (1)
1005 while (s != t) {
1006 if (foo ()) x = *s;
1007 use (x);
1009 Clearing x before the inner loop could clobber a value
1010 being saved from the last time around the outer loop.
1011 However, if the reg is not used outside this loop
1012 and all uses of the register are in the same
1013 basic block as the store, there is no problem.
1015 If this insn was made by loop, we don't know its
1016 INSN_LUID and hence must make a conservative
1017 assumption. */
1018 m->global = (INSN_UID (p) >= max_uid_for_loop
1019 || (uid_luid[REGNO_LAST_UID (regno)]
1020 > INSN_LUID (loop_end))
1021 || (uid_luid[REGNO_FIRST_UID (regno)]
1022 < INSN_LUID (p))
1023 || (labels_in_range_p
1024 (p, uid_luid[REGNO_FIRST_UID (regno)])));
1025 if (maybe_never && m->global)
1026 m->savemode = GET_MODE (SET_SRC (set1));
1027 else
1028 m->savemode = VOIDmode;
1029 m->regno = regno;
1030 m->cond = 0;
1031 m->match = 0;
1032 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1033 - uid_luid[REGNO_FIRST_UID (regno)]);
1034 m->savings = 1;
1035 VARRAY_INT (set_in_loop, regno) = -1;
1036 /* Add M to the end of the chain MOVABLES. */
1037 if (movables == 0)
1038 movables = m;
1039 else
1040 last_movable->next = m;
1041 last_movable = m;
1045 /* Past a call insn, we get to insns which might not be executed
1046 because the call might exit. This matters for insns that trap.
1047 Constant and pure call insns always return, so they don't count. */
1048 else if (GET_CODE (p) == CALL_INSN && ! CONST_CALL_P (p))
1049 call_passed = 1;
1050 /* Past a label or a jump, we get to insns for which we
1051 can't count on whether or how many times they will be
1052 executed during each iteration. Therefore, we can
1053 only move out sets of trivial variables
1054 (those not used after the loop). */
1055 /* Similar code appears twice in strength_reduce. */
1056 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1057 /* If we enter the loop in the middle, and scan around to the
1058 beginning, don't set maybe_never for that. This must be an
1059 unconditional jump, otherwise the code at the top of the
1060 loop might never be executed. Unconditional jumps are
1061 followed a by barrier then loop end. */
1062 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1063 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1064 && any_uncondjump_p (p)))
1065 maybe_never = 1;
1066 else if (GET_CODE (p) == NOTE)
1068 /* At the virtual top of a converted loop, insns are again known to
1069 be executed: logically, the loop begins here even though the exit
1070 code has been duplicated. */
1071 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1072 maybe_never = call_passed = 0;
1073 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1074 loop_depth++;
1075 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1076 loop_depth--;
1080 /* If one movable subsumes another, ignore that other. */
1082 ignore_some_movables (movables);
1084 /* For each movable insn, see if the reg that it loads
1085 leads when it dies right into another conditionally movable insn.
1086 If so, record that the second insn "forces" the first one,
1087 since the second can be moved only if the first is. */
1089 force_movables (movables);
1091 /* See if there are multiple movable insns that load the same value.
1092 If there are, make all but the first point at the first one
1093 through the `match' field, and add the priorities of them
1094 all together as the priority of the first. */
1096 combine_movables (movables, nregs);
1098 /* Now consider each movable insn to decide whether it is worth moving.
1099 Store 0 in set_in_loop for each reg that is moved.
1101 Generally this increases code size, so do not move moveables when
1102 optimizing for code size. */
1104 if (! optimize_size)
1105 move_movables (loop, movables, threshold, insn_count, nregs);
1107 /* Now candidates that still are negative are those not moved.
1108 Change set_in_loop to indicate that those are not actually invariant. */
1109 for (i = 0; i < nregs; i++)
1110 if (VARRAY_INT (set_in_loop, i) < 0)
1111 VARRAY_INT (set_in_loop, i) = VARRAY_INT (n_times_set, i);
1113 /* Now that we've moved some things out of the loop, we might be able to
1114 hoist even more memory references. */
1115 load_mems_and_recount_loop_regs_set (loop, &insn_count);
1117 for (update_start = loop_start;
1118 PREV_INSN (update_start)
1119 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1120 update_start = PREV_INSN (update_start))
1122 update_end = NEXT_INSN (loop_end);
1124 reg_scan_update (update_start, update_end, loop_max_reg);
1125 loop_max_reg = max_reg_num ();
1127 if (flag_strength_reduce)
1129 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1130 /* Ensure our label doesn't go away. */
1131 LABEL_NUSES (update_end)++;
1133 the_movables = movables;
1134 strength_reduce (loop, insn_count, flags);
1136 reg_scan_update (update_start, update_end, loop_max_reg);
1137 loop_max_reg = max_reg_num ();
1139 if (update_end && GET_CODE (update_end) == CODE_LABEL
1140 && --LABEL_NUSES (update_end) == 0)
1141 delete_insn (update_end);
1144 VARRAY_FREE (reg_single_usage);
1145 VARRAY_FREE (set_in_loop);
1146 VARRAY_FREE (n_times_set);
1147 VARRAY_FREE (may_not_optimize);
1150 /* Add elements to *OUTPUT to record all the pseudo-regs
1151 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1153 void
1154 record_excess_regs (in_this, not_in_this, output)
1155 rtx in_this, not_in_this;
1156 rtx *output;
1158 enum rtx_code code;
1159 const char *fmt;
1160 int i;
1162 code = GET_CODE (in_this);
1164 switch (code)
1166 case PC:
1167 case CC0:
1168 case CONST_INT:
1169 case CONST_DOUBLE:
1170 case CONST:
1171 case SYMBOL_REF:
1172 case LABEL_REF:
1173 return;
1175 case REG:
1176 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1177 && ! reg_mentioned_p (in_this, not_in_this))
1178 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1179 return;
1181 default:
1182 break;
1185 fmt = GET_RTX_FORMAT (code);
1186 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1188 int j;
1190 switch (fmt[i])
1192 case 'E':
1193 for (j = 0; j < XVECLEN (in_this, i); j++)
1194 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1195 break;
1197 case 'e':
1198 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1199 break;
1204 /* Check what regs are referred to in the libcall block ending with INSN,
1205 aside from those mentioned in the equivalent value.
1206 If there are none, return 0.
1207 If there are one or more, return an EXPR_LIST containing all of them. */
1210 libcall_other_reg (insn, equiv)
1211 rtx insn, equiv;
1213 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1214 rtx p = XEXP (note, 0);
1215 rtx output = 0;
1217 /* First, find all the regs used in the libcall block
1218 that are not mentioned as inputs to the result. */
1220 while (p != insn)
1222 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1223 || GET_CODE (p) == CALL_INSN)
1224 record_excess_regs (PATTERN (p), equiv, &output);
1225 p = NEXT_INSN (p);
1228 return output;
1231 /* Return 1 if all uses of REG
1232 are between INSN and the end of the basic block. */
1234 static int
1235 reg_in_basic_block_p (insn, reg)
1236 rtx insn, reg;
1238 int regno = REGNO (reg);
1239 rtx p;
1241 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1242 return 0;
1244 /* Search this basic block for the already recorded last use of the reg. */
1245 for (p = insn; p; p = NEXT_INSN (p))
1247 switch (GET_CODE (p))
1249 case NOTE:
1250 break;
1252 case INSN:
1253 case CALL_INSN:
1254 /* Ordinary insn: if this is the last use, we win. */
1255 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1256 return 1;
1257 break;
1259 case JUMP_INSN:
1260 /* Jump insn: if this is the last use, we win. */
1261 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1262 return 1;
1263 /* Otherwise, it's the end of the basic block, so we lose. */
1264 return 0;
1266 case CODE_LABEL:
1267 case BARRIER:
1268 /* It's the end of the basic block, so we lose. */
1269 return 0;
1271 default:
1272 break;
1276 /* The "last use" that was recorded can't be found after the first
1277 use. This can happen when the last use was deleted while
1278 processing an inner loop, this inner loop was then completely
1279 unrolled, and the outer loop is always exited after the inner loop,
1280 so that everything after the first use becomes a single basic block. */
1281 return 1;
1284 /* Compute the benefit of eliminating the insns in the block whose
1285 last insn is LAST. This may be a group of insns used to compute a
1286 value directly or can contain a library call. */
1288 static int
1289 libcall_benefit (last)
1290 rtx last;
1292 rtx insn;
1293 int benefit = 0;
1295 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1296 insn != last; insn = NEXT_INSN (insn))
1298 if (GET_CODE (insn) == CALL_INSN)
1299 benefit += 10; /* Assume at least this many insns in a library
1300 routine. */
1301 else if (GET_CODE (insn) == INSN
1302 && GET_CODE (PATTERN (insn)) != USE
1303 && GET_CODE (PATTERN (insn)) != CLOBBER)
1304 benefit++;
1307 return benefit;
1310 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1312 static rtx
1313 skip_consec_insns (insn, count)
1314 rtx insn;
1315 int count;
1317 for (; count > 0; count--)
1319 rtx temp;
1321 /* If first insn of libcall sequence, skip to end. */
1322 /* Do this at start of loop, since INSN is guaranteed to
1323 be an insn here. */
1324 if (GET_CODE (insn) != NOTE
1325 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1326 insn = XEXP (temp, 0);
1329 insn = NEXT_INSN (insn);
1330 while (GET_CODE (insn) == NOTE);
1333 return insn;
1336 /* Ignore any movable whose insn falls within a libcall
1337 which is part of another movable.
1338 We make use of the fact that the movable for the libcall value
1339 was made later and so appears later on the chain. */
1341 static void
1342 ignore_some_movables (movables)
1343 struct movable *movables;
1345 register struct movable *m, *m1;
1347 for (m = movables; m; m = m->next)
1349 /* Is this a movable for the value of a libcall? */
1350 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1351 if (note)
1353 rtx insn;
1354 /* Check for earlier movables inside that range,
1355 and mark them invalid. We cannot use LUIDs here because
1356 insns created by loop.c for prior loops don't have LUIDs.
1357 Rather than reject all such insns from movables, we just
1358 explicitly check each insn in the libcall (since invariant
1359 libcalls aren't that common). */
1360 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1361 for (m1 = movables; m1 != m; m1 = m1->next)
1362 if (m1->insn == insn)
1363 m1->done = 1;
1368 /* For each movable insn, see if the reg that it loads
1369 leads when it dies right into another conditionally movable insn.
1370 If so, record that the second insn "forces" the first one,
1371 since the second can be moved only if the first is. */
1373 static void
1374 force_movables (movables)
1375 struct movable *movables;
1377 register struct movable *m, *m1;
1378 for (m1 = movables; m1; m1 = m1->next)
1379 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1380 if (!m1->partial && !m1->done)
1382 int regno = m1->regno;
1383 for (m = m1->next; m; m = m->next)
1384 /* ??? Could this be a bug? What if CSE caused the
1385 register of M1 to be used after this insn?
1386 Since CSE does not update regno_last_uid,
1387 this insn M->insn might not be where it dies.
1388 But very likely this doesn't matter; what matters is
1389 that M's reg is computed from M1's reg. */
1390 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1391 && !m->done)
1392 break;
1393 if (m != 0 && m->set_src == m1->set_dest
1394 /* If m->consec, m->set_src isn't valid. */
1395 && m->consec == 0)
1396 m = 0;
1398 /* Increase the priority of the moving the first insn
1399 since it permits the second to be moved as well. */
1400 if (m != 0)
1402 m->forces = m1;
1403 m1->lifetime += m->lifetime;
1404 m1->savings += m->savings;
1409 /* Find invariant expressions that are equal and can be combined into
1410 one register. */
1412 static void
1413 combine_movables (movables, nregs)
1414 struct movable *movables;
1415 int nregs;
1417 register struct movable *m;
1418 char *matched_regs = (char *) xmalloc (nregs);
1419 enum machine_mode mode;
1421 /* Regs that are set more than once are not allowed to match
1422 or be matched. I'm no longer sure why not. */
1423 /* Perhaps testing m->consec_sets would be more appropriate here? */
1425 for (m = movables; m; m = m->next)
1426 if (m->match == 0 && VARRAY_INT (n_times_set, m->regno) == 1
1427 && !m->partial)
1429 register struct movable *m1;
1430 int regno = m->regno;
1432 bzero (matched_regs, nregs);
1433 matched_regs[regno] = 1;
1435 /* We want later insns to match the first one. Don't make the first
1436 one match any later ones. So start this loop at m->next. */
1437 for (m1 = m->next; m1; m1 = m1->next)
1438 if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_set, m1->regno) == 1
1439 /* A reg used outside the loop mustn't be eliminated. */
1440 && !m1->global
1441 /* A reg used for zero-extending mustn't be eliminated. */
1442 && !m1->partial
1443 && (matched_regs[m1->regno]
1446 /* Can combine regs with different modes loaded from the
1447 same constant only if the modes are the same or
1448 if both are integer modes with M wider or the same
1449 width as M1. The check for integer is redundant, but
1450 safe, since the only case of differing destination
1451 modes with equal sources is when both sources are
1452 VOIDmode, i.e., CONST_INT. */
1453 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1454 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1455 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1456 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1457 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1458 /* See if the source of M1 says it matches M. */
1459 && ((GET_CODE (m1->set_src) == REG
1460 && matched_regs[REGNO (m1->set_src)])
1461 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1462 movables))))
1463 && ((m->dependencies == m1->dependencies)
1464 || rtx_equal_p (m->dependencies, m1->dependencies)))
1466 m->lifetime += m1->lifetime;
1467 m->savings += m1->savings;
1468 m1->done = 1;
1469 m1->match = m;
1470 matched_regs[m1->regno] = 1;
1474 /* Now combine the regs used for zero-extension.
1475 This can be done for those not marked `global'
1476 provided their lives don't overlap. */
1478 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1479 mode = GET_MODE_WIDER_MODE (mode))
1481 register struct movable *m0 = 0;
1483 /* Combine all the registers for extension from mode MODE.
1484 Don't combine any that are used outside this loop. */
1485 for (m = movables; m; m = m->next)
1486 if (m->partial && ! m->global
1487 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1489 register struct movable *m1;
1490 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1491 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1493 if (m0 == 0)
1495 /* First one: don't check for overlap, just record it. */
1496 m0 = m;
1497 continue;
1500 /* Make sure they extend to the same mode.
1501 (Almost always true.) */
1502 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1503 continue;
1505 /* We already have one: check for overlap with those
1506 already combined together. */
1507 for (m1 = movables; m1 != m; m1 = m1->next)
1508 if (m1 == m0 || (m1->partial && m1->match == m0))
1509 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1510 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1511 goto overlap;
1513 /* No overlap: we can combine this with the others. */
1514 m0->lifetime += m->lifetime;
1515 m0->savings += m->savings;
1516 m->done = 1;
1517 m->match = m0;
1519 overlap:
1524 /* Clean up. */
1525 free (matched_regs);
1528 /* Return 1 if regs X and Y will become the same if moved. */
1530 static int
1531 regs_match_p (x, y, movables)
1532 rtx x, y;
1533 struct movable *movables;
1535 unsigned int xn = REGNO (x);
1536 unsigned int yn = REGNO (y);
1537 struct movable *mx, *my;
1539 for (mx = movables; mx; mx = mx->next)
1540 if (mx->regno == xn)
1541 break;
1543 for (my = movables; my; my = my->next)
1544 if (my->regno == yn)
1545 break;
1547 return (mx && my
1548 && ((mx->match == my->match && mx->match != 0)
1549 || mx->match == my
1550 || mx == my->match));
1553 /* Return 1 if X and Y are identical-looking rtx's.
1554 This is the Lisp function EQUAL for rtx arguments.
1556 If two registers are matching movables or a movable register and an
1557 equivalent constant, consider them equal. */
1559 static int
1560 rtx_equal_for_loop_p (x, y, movables)
1561 rtx x, y;
1562 struct movable *movables;
1564 register int i;
1565 register int j;
1566 register struct movable *m;
1567 register enum rtx_code code;
1568 register const char *fmt;
1570 if (x == y)
1571 return 1;
1572 if (x == 0 || y == 0)
1573 return 0;
1575 code = GET_CODE (x);
1577 /* If we have a register and a constant, they may sometimes be
1578 equal. */
1579 if (GET_CODE (x) == REG && VARRAY_INT (set_in_loop, REGNO (x)) == -2
1580 && CONSTANT_P (y))
1582 for (m = movables; m; m = m->next)
1583 if (m->move_insn && m->regno == REGNO (x)
1584 && rtx_equal_p (m->set_src, y))
1585 return 1;
1587 else if (GET_CODE (y) == REG && VARRAY_INT (set_in_loop, REGNO (y)) == -2
1588 && CONSTANT_P (x))
1590 for (m = movables; m; m = m->next)
1591 if (m->move_insn && m->regno == REGNO (y)
1592 && rtx_equal_p (m->set_src, x))
1593 return 1;
1596 /* Otherwise, rtx's of different codes cannot be equal. */
1597 if (code != GET_CODE (y))
1598 return 0;
1600 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1601 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1603 if (GET_MODE (x) != GET_MODE (y))
1604 return 0;
1606 /* These three types of rtx's can be compared nonrecursively. */
1607 if (code == REG)
1608 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1610 if (code == LABEL_REF)
1611 return XEXP (x, 0) == XEXP (y, 0);
1612 if (code == SYMBOL_REF)
1613 return XSTR (x, 0) == XSTR (y, 0);
1615 /* Compare the elements. If any pair of corresponding elements
1616 fail to match, return 0 for the whole things. */
1618 fmt = GET_RTX_FORMAT (code);
1619 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1621 switch (fmt[i])
1623 case 'w':
1624 if (XWINT (x, i) != XWINT (y, i))
1625 return 0;
1626 break;
1628 case 'i':
1629 if (XINT (x, i) != XINT (y, i))
1630 return 0;
1631 break;
1633 case 'E':
1634 /* Two vectors must have the same length. */
1635 if (XVECLEN (x, i) != XVECLEN (y, i))
1636 return 0;
1638 /* And the corresponding elements must match. */
1639 for (j = 0; j < XVECLEN (x, i); j++)
1640 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1641 return 0;
1642 break;
1644 case 'e':
1645 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1646 return 0;
1647 break;
1649 case 's':
1650 if (strcmp (XSTR (x, i), XSTR (y, i)))
1651 return 0;
1652 break;
1654 case 'u':
1655 /* These are just backpointers, so they don't matter. */
1656 break;
1658 case '0':
1659 break;
1661 /* It is believed that rtx's at this level will never
1662 contain anything but integers and other rtx's,
1663 except for within LABEL_REFs and SYMBOL_REFs. */
1664 default:
1665 abort ();
1668 return 1;
1671 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1672 insns in INSNS which use the reference. */
1674 static void
1675 add_label_notes (x, insns)
1676 rtx x;
1677 rtx insns;
1679 enum rtx_code code = GET_CODE (x);
1680 int i, j;
1681 const char *fmt;
1682 rtx insn;
1684 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1686 /* This code used to ignore labels that referred to dispatch tables to
1687 avoid flow generating (slighly) worse code.
1689 We no longer ignore such label references (see LABEL_REF handling in
1690 mark_jump_label for additional information). */
1691 for (insn = insns; insn; insn = NEXT_INSN (insn))
1692 if (reg_mentioned_p (XEXP (x, 0), insn))
1693 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1694 REG_NOTES (insn));
1697 fmt = GET_RTX_FORMAT (code);
1698 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1700 if (fmt[i] == 'e')
1701 add_label_notes (XEXP (x, i), insns);
1702 else if (fmt[i] == 'E')
1703 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1704 add_label_notes (XVECEXP (x, i, j), insns);
1708 /* Scan MOVABLES, and move the insns that deserve to be moved.
1709 If two matching movables are combined, replace one reg with the
1710 other throughout. */
1712 static void
1713 move_movables (loop, movables, threshold, insn_count, nregs)
1714 struct loop *loop;
1715 struct movable *movables;
1716 int threshold;
1717 int insn_count;
1718 int nregs;
1720 rtx new_start = 0;
1721 register struct movable *m;
1722 register rtx p;
1723 rtx loop_start = loop->start;
1724 rtx loop_end = loop->end;
1725 /* Map of pseudo-register replacements to handle combining
1726 when we move several insns that load the same value
1727 into different pseudo-registers. */
1728 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1729 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1731 num_movables = 0;
1733 for (m = movables; m; m = m->next)
1735 /* Describe this movable insn. */
1737 if (loop_dump_stream)
1739 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1740 INSN_UID (m->insn), m->regno, m->lifetime);
1741 if (m->consec > 0)
1742 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1743 if (m->cond)
1744 fprintf (loop_dump_stream, "cond ");
1745 if (m->force)
1746 fprintf (loop_dump_stream, "force ");
1747 if (m->global)
1748 fprintf (loop_dump_stream, "global ");
1749 if (m->done)
1750 fprintf (loop_dump_stream, "done ");
1751 if (m->move_insn)
1752 fprintf (loop_dump_stream, "move-insn ");
1753 if (m->match)
1754 fprintf (loop_dump_stream, "matches %d ",
1755 INSN_UID (m->match->insn));
1756 if (m->forces)
1757 fprintf (loop_dump_stream, "forces %d ",
1758 INSN_UID (m->forces->insn));
1761 /* Count movables. Value used in heuristics in strength_reduce. */
1762 num_movables++;
1764 /* Ignore the insn if it's already done (it matched something else).
1765 Otherwise, see if it is now safe to move. */
1767 if (!m->done
1768 && (! m->cond
1769 || (1 == loop_invariant_p (loop, m->set_src)
1770 && (m->dependencies == 0
1771 || 1 == loop_invariant_p (loop, m->dependencies))
1772 && (m->consec == 0
1773 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1774 m->consec + 1,
1775 m->insn))))
1776 && (! m->forces || m->forces->done))
1778 register int regno;
1779 register rtx p;
1780 int savings = m->savings;
1782 /* We have an insn that is safe to move.
1783 Compute its desirability. */
1785 p = m->insn;
1786 regno = m->regno;
1788 if (loop_dump_stream)
1789 fprintf (loop_dump_stream, "savings %d ", savings);
1791 if (moved_once[regno] && loop_dump_stream)
1792 fprintf (loop_dump_stream, "halved since already moved ");
1794 /* An insn MUST be moved if we already moved something else
1795 which is safe only if this one is moved too: that is,
1796 if already_moved[REGNO] is nonzero. */
1798 /* An insn is desirable to move if the new lifetime of the
1799 register is no more than THRESHOLD times the old lifetime.
1800 If it's not desirable, it means the loop is so big
1801 that moving won't speed things up much,
1802 and it is liable to make register usage worse. */
1804 /* It is also desirable to move if it can be moved at no
1805 extra cost because something else was already moved. */
1807 if (already_moved[regno]
1808 || flag_move_all_movables
1809 || (threshold * savings * m->lifetime) >=
1810 (moved_once[regno] ? insn_count * 2 : insn_count)
1811 || (m->forces && m->forces->done
1812 && VARRAY_INT (n_times_set, m->forces->regno) == 1))
1814 int count;
1815 register struct movable *m1;
1816 rtx first = NULL_RTX;
1818 /* Now move the insns that set the reg. */
1820 if (m->partial && m->match)
1822 rtx newpat, i1;
1823 rtx r1, r2;
1824 /* Find the end of this chain of matching regs.
1825 Thus, we load each reg in the chain from that one reg.
1826 And that reg is loaded with 0 directly,
1827 since it has ->match == 0. */
1828 for (m1 = m; m1->match; m1 = m1->match);
1829 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1830 SET_DEST (PATTERN (m1->insn)));
1831 i1 = emit_insn_before (newpat, loop_start);
1833 /* Mark the moved, invariant reg as being allowed to
1834 share a hard reg with the other matching invariant. */
1835 REG_NOTES (i1) = REG_NOTES (m->insn);
1836 r1 = SET_DEST (PATTERN (m->insn));
1837 r2 = SET_DEST (PATTERN (m1->insn));
1838 regs_may_share
1839 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1840 gen_rtx_EXPR_LIST (VOIDmode, r2,
1841 regs_may_share));
1842 delete_insn (m->insn);
1844 if (new_start == 0)
1845 new_start = i1;
1847 if (loop_dump_stream)
1848 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1850 /* If we are to re-generate the item being moved with a
1851 new move insn, first delete what we have and then emit
1852 the move insn before the loop. */
1853 else if (m->move_insn)
1855 rtx i1, temp;
1857 for (count = m->consec; count >= 0; count--)
1859 /* If this is the first insn of a library call sequence,
1860 skip to the end. */
1861 if (GET_CODE (p) != NOTE
1862 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1863 p = XEXP (temp, 0);
1865 /* If this is the last insn of a libcall sequence, then
1866 delete every insn in the sequence except the last.
1867 The last insn is handled in the normal manner. */
1868 if (GET_CODE (p) != NOTE
1869 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1871 temp = XEXP (temp, 0);
1872 while (temp != p)
1873 temp = delete_insn (temp);
1876 temp = p;
1877 p = delete_insn (p);
1879 /* simplify_giv_expr expects that it can walk the insns
1880 at m->insn forwards and see this old sequence we are
1881 tossing here. delete_insn does preserve the next
1882 pointers, but when we skip over a NOTE we must fix
1883 it up. Otherwise that code walks into the non-deleted
1884 insn stream. */
1885 while (p && GET_CODE (p) == NOTE)
1886 p = NEXT_INSN (temp) = NEXT_INSN (p);
1889 start_sequence ();
1890 emit_move_insn (m->set_dest, m->set_src);
1891 temp = get_insns ();
1892 end_sequence ();
1894 add_label_notes (m->set_src, temp);
1896 i1 = emit_insns_before (temp, loop_start);
1897 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1898 REG_NOTES (i1)
1899 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1900 m->set_src, REG_NOTES (i1));
1902 if (loop_dump_stream)
1903 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1905 /* The more regs we move, the less we like moving them. */
1906 threshold -= 3;
1908 else
1910 for (count = m->consec; count >= 0; count--)
1912 rtx i1, temp;
1914 /* If first insn of libcall sequence, skip to end. */
1915 /* Do this at start of loop, since p is guaranteed to
1916 be an insn here. */
1917 if (GET_CODE (p) != NOTE
1918 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1919 p = XEXP (temp, 0);
1921 /* If last insn of libcall sequence, move all
1922 insns except the last before the loop. The last
1923 insn is handled in the normal manner. */
1924 if (GET_CODE (p) != NOTE
1925 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1927 rtx fn_address = 0;
1928 rtx fn_reg = 0;
1929 rtx fn_address_insn = 0;
1931 first = 0;
1932 for (temp = XEXP (temp, 0); temp != p;
1933 temp = NEXT_INSN (temp))
1935 rtx body;
1936 rtx n;
1937 rtx next;
1939 if (GET_CODE (temp) == NOTE)
1940 continue;
1942 body = PATTERN (temp);
1944 /* Find the next insn after TEMP,
1945 not counting USE or NOTE insns. */
1946 for (next = NEXT_INSN (temp); next != p;
1947 next = NEXT_INSN (next))
1948 if (! (GET_CODE (next) == INSN
1949 && GET_CODE (PATTERN (next)) == USE)
1950 && GET_CODE (next) != NOTE)
1951 break;
1953 /* If that is the call, this may be the insn
1954 that loads the function address.
1956 Extract the function address from the insn
1957 that loads it into a register.
1958 If this insn was cse'd, we get incorrect code.
1960 So emit a new move insn that copies the
1961 function address into the register that the
1962 call insn will use. flow.c will delete any
1963 redundant stores that we have created. */
1964 if (GET_CODE (next) == CALL_INSN
1965 && GET_CODE (body) == SET
1966 && GET_CODE (SET_DEST (body)) == REG
1967 && (n = find_reg_note (temp, REG_EQUAL,
1968 NULL_RTX)))
1970 fn_reg = SET_SRC (body);
1971 if (GET_CODE (fn_reg) != REG)
1972 fn_reg = SET_DEST (body);
1973 fn_address = XEXP (n, 0);
1974 fn_address_insn = temp;
1976 /* We have the call insn.
1977 If it uses the register we suspect it might,
1978 load it with the correct address directly. */
1979 if (GET_CODE (temp) == CALL_INSN
1980 && fn_address != 0
1981 && reg_referenced_p (fn_reg, body))
1982 emit_insn_after (gen_move_insn (fn_reg,
1983 fn_address),
1984 fn_address_insn);
1986 if (GET_CODE (temp) == CALL_INSN)
1988 i1 = emit_call_insn_before (body, loop_start);
1989 /* Because the USAGE information potentially
1990 contains objects other than hard registers
1991 we need to copy it. */
1992 if (CALL_INSN_FUNCTION_USAGE (temp))
1993 CALL_INSN_FUNCTION_USAGE (i1)
1994 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1996 else
1997 i1 = emit_insn_before (body, loop_start);
1998 if (first == 0)
1999 first = i1;
2000 if (temp == fn_address_insn)
2001 fn_address_insn = i1;
2002 REG_NOTES (i1) = REG_NOTES (temp);
2003 delete_insn (temp);
2005 if (new_start == 0)
2006 new_start = first;
2008 if (m->savemode != VOIDmode)
2010 /* P sets REG to zero; but we should clear only
2011 the bits that are not covered by the mode
2012 m->savemode. */
2013 rtx reg = m->set_dest;
2014 rtx sequence;
2015 rtx tem;
2017 start_sequence ();
2018 tem = expand_binop
2019 (GET_MODE (reg), and_optab, reg,
2020 GEN_INT ((((HOST_WIDE_INT) 1
2021 << GET_MODE_BITSIZE (m->savemode)))
2022 - 1),
2023 reg, 1, OPTAB_LIB_WIDEN);
2024 if (tem == 0)
2025 abort ();
2026 if (tem != reg)
2027 emit_move_insn (reg, tem);
2028 sequence = gen_sequence ();
2029 end_sequence ();
2030 i1 = emit_insn_before (sequence, loop_start);
2032 else if (GET_CODE (p) == CALL_INSN)
2034 i1 = emit_call_insn_before (PATTERN (p), loop_start);
2035 /* Because the USAGE information potentially
2036 contains objects other than hard registers
2037 we need to copy it. */
2038 if (CALL_INSN_FUNCTION_USAGE (p))
2039 CALL_INSN_FUNCTION_USAGE (i1)
2040 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2042 else if (count == m->consec && m->move_insn_first)
2044 /* The SET_SRC might not be invariant, so we must
2045 use the REG_EQUAL note. */
2046 start_sequence ();
2047 emit_move_insn (m->set_dest, m->set_src);
2048 temp = get_insns ();
2049 end_sequence ();
2051 add_label_notes (m->set_src, temp);
2053 i1 = emit_insns_before (temp, loop_start);
2054 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2055 REG_NOTES (i1)
2056 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
2057 : REG_EQUAL),
2058 m->set_src, REG_NOTES (i1));
2060 else
2061 i1 = emit_insn_before (PATTERN (p), loop_start);
2063 if (REG_NOTES (i1) == 0)
2065 REG_NOTES (i1) = REG_NOTES (p);
2067 /* If there is a REG_EQUAL note present whose value
2068 is not loop invariant, then delete it, since it
2069 may cause problems with later optimization passes.
2070 It is possible for cse to create such notes
2071 like this as a result of record_jump_cond. */
2073 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2074 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2075 remove_note (i1, temp);
2078 if (new_start == 0)
2079 new_start = i1;
2081 if (loop_dump_stream)
2082 fprintf (loop_dump_stream, " moved to %d",
2083 INSN_UID (i1));
2085 /* If library call, now fix the REG_NOTES that contain
2086 insn pointers, namely REG_LIBCALL on FIRST
2087 and REG_RETVAL on I1. */
2088 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2090 XEXP (temp, 0) = first;
2091 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2092 XEXP (temp, 0) = i1;
2095 temp = p;
2096 delete_insn (p);
2097 p = NEXT_INSN (p);
2099 /* simplify_giv_expr expects that it can walk the insns
2100 at m->insn forwards and see this old sequence we are
2101 tossing here. delete_insn does preserve the next
2102 pointers, but when we skip over a NOTE we must fix
2103 it up. Otherwise that code walks into the non-deleted
2104 insn stream. */
2105 while (p && GET_CODE (p) == NOTE)
2106 p = NEXT_INSN (temp) = NEXT_INSN (p);
2109 /* The more regs we move, the less we like moving them. */
2110 threshold -= 3;
2113 /* Any other movable that loads the same register
2114 MUST be moved. */
2115 already_moved[regno] = 1;
2117 /* This reg has been moved out of one loop. */
2118 moved_once[regno] = 1;
2120 /* The reg set here is now invariant. */
2121 if (! m->partial)
2122 VARRAY_INT (set_in_loop, regno) = 0;
2124 m->done = 1;
2126 /* Change the length-of-life info for the register
2127 to say it lives at least the full length of this loop.
2128 This will help guide optimizations in outer loops. */
2130 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2131 /* This is the old insn before all the moved insns.
2132 We can't use the moved insn because it is out of range
2133 in uid_luid. Only the old insns have luids. */
2134 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2135 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (loop_end))
2136 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2138 /* Combine with this moved insn any other matching movables. */
2140 if (! m->partial)
2141 for (m1 = movables; m1; m1 = m1->next)
2142 if (m1->match == m)
2144 rtx temp;
2146 /* Schedule the reg loaded by M1
2147 for replacement so that shares the reg of M.
2148 If the modes differ (only possible in restricted
2149 circumstances, make a SUBREG.
2151 Note this assumes that the target dependent files
2152 treat REG and SUBREG equally, including within
2153 GO_IF_LEGITIMATE_ADDRESS and in all the
2154 predicates since we never verify that replacing the
2155 original register with a SUBREG results in a
2156 recognizable insn. */
2157 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2158 reg_map[m1->regno] = m->set_dest;
2159 else
2160 reg_map[m1->regno]
2161 = gen_lowpart_common (GET_MODE (m1->set_dest),
2162 m->set_dest);
2164 /* Get rid of the matching insn
2165 and prevent further processing of it. */
2166 m1->done = 1;
2168 /* if library call, delete all insn except last, which
2169 is deleted below */
2170 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2171 NULL_RTX)))
2173 for (temp = XEXP (temp, 0); temp != m1->insn;
2174 temp = NEXT_INSN (temp))
2175 delete_insn (temp);
2177 delete_insn (m1->insn);
2179 /* Any other movable that loads the same register
2180 MUST be moved. */
2181 already_moved[m1->regno] = 1;
2183 /* The reg merged here is now invariant,
2184 if the reg it matches is invariant. */
2185 if (! m->partial)
2186 VARRAY_INT (set_in_loop, m1->regno) = 0;
2189 else if (loop_dump_stream)
2190 fprintf (loop_dump_stream, "not desirable");
2192 else if (loop_dump_stream && !m->match)
2193 fprintf (loop_dump_stream, "not safe");
2195 if (loop_dump_stream)
2196 fprintf (loop_dump_stream, "\n");
2199 if (new_start == 0)
2200 new_start = loop_start;
2202 /* Go through all the instructions in the loop, making
2203 all the register substitutions scheduled in REG_MAP. */
2204 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2205 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2206 || GET_CODE (p) == CALL_INSN)
2208 replace_regs (PATTERN (p), reg_map, nregs, 0);
2209 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2210 INSN_CODE (p) = -1;
2213 /* Clean up. */
2214 free (reg_map);
2215 free (already_moved);
2218 #if 0
2219 /* Scan X and replace the address of any MEM in it with ADDR.
2220 REG is the address that MEM should have before the replacement. */
2222 static void
2223 replace_call_address (x, reg, addr)
2224 rtx x, reg, addr;
2226 register enum rtx_code code;
2227 register int i;
2228 register const char *fmt;
2230 if (x == 0)
2231 return;
2232 code = GET_CODE (x);
2233 switch (code)
2235 case PC:
2236 case CC0:
2237 case CONST_INT:
2238 case CONST_DOUBLE:
2239 case CONST:
2240 case SYMBOL_REF:
2241 case LABEL_REF:
2242 case REG:
2243 return;
2245 case SET:
2246 /* Short cut for very common case. */
2247 replace_call_address (XEXP (x, 1), reg, addr);
2248 return;
2250 case CALL:
2251 /* Short cut for very common case. */
2252 replace_call_address (XEXP (x, 0), reg, addr);
2253 return;
2255 case MEM:
2256 /* If this MEM uses a reg other than the one we expected,
2257 something is wrong. */
2258 if (XEXP (x, 0) != reg)
2259 abort ();
2260 XEXP (x, 0) = addr;
2261 return;
2263 default:
2264 break;
2267 fmt = GET_RTX_FORMAT (code);
2268 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2270 if (fmt[i] == 'e')
2271 replace_call_address (XEXP (x, i), reg, addr);
2272 else if (fmt[i] == 'E')
2274 register int j;
2275 for (j = 0; j < XVECLEN (x, i); j++)
2276 replace_call_address (XVECEXP (x, i, j), reg, addr);
2280 #endif
2282 /* Return the number of memory refs to addresses that vary
2283 in the rtx X. */
2285 static int
2286 count_nonfixed_reads (loop, x)
2287 const struct loop *loop;
2288 rtx x;
2290 register enum rtx_code code;
2291 register int i;
2292 register const char *fmt;
2293 int value;
2295 if (x == 0)
2296 return 0;
2298 code = GET_CODE (x);
2299 switch (code)
2301 case PC:
2302 case CC0:
2303 case CONST_INT:
2304 case CONST_DOUBLE:
2305 case CONST:
2306 case SYMBOL_REF:
2307 case LABEL_REF:
2308 case REG:
2309 return 0;
2311 case MEM:
2312 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2313 + count_nonfixed_reads (loop, XEXP (x, 0)));
2315 default:
2316 break;
2319 value = 0;
2320 fmt = GET_RTX_FORMAT (code);
2321 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2323 if (fmt[i] == 'e')
2324 value += count_nonfixed_reads (loop, XEXP (x, i));
2325 if (fmt[i] == 'E')
2327 register int j;
2328 for (j = 0; j < XVECLEN (x, i); j++)
2329 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2332 return value;
2335 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2336 `has_call', `has_volatile', and `has_tablejump' within LOOP.
2337 Set the global variables `unknown_address_altered',
2338 `unknown_constant_address_altered', and `num_mem_sets'. Also, fill
2339 in the array `loop_mems' and the list `loop_store_mems'. */
2341 static void
2342 prescan_loop (loop)
2343 struct loop *loop;
2345 register int level = 1;
2346 rtx insn;
2347 struct loop_info *loop_info = LOOP_INFO (loop);
2348 rtx start = loop->start;
2349 rtx end = loop->end;
2350 /* The label after END. Jumping here is just like falling off the
2351 end of the loop. We use next_nonnote_insn instead of next_label
2352 as a hedge against the (pathological) case where some actual insn
2353 might end up between the two. */
2354 rtx exit_target = next_nonnote_insn (end);
2356 loop_info->has_indirect_jump = indirect_jump_in_function;
2357 loop_info->has_call = 0;
2358 loop_info->has_volatile = 0;
2359 loop_info->has_tablejump = 0;
2360 loop_info->has_multiple_exit_targets = 0;
2361 loop->level = 1;
2363 unknown_address_altered = 0;
2364 unknown_constant_address_altered = 0;
2365 loop_store_mems = NULL_RTX;
2366 first_loop_store_insn = NULL_RTX;
2367 loop_mems_idx = 0;
2368 num_mem_sets = 0;
2370 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2371 insn = NEXT_INSN (insn))
2373 if (GET_CODE (insn) == NOTE)
2375 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2377 ++level;
2378 /* Count number of loops contained in this one. */
2379 loop->level++;
2381 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2383 --level;
2386 else if (GET_CODE (insn) == CALL_INSN)
2388 if (! CONST_CALL_P (insn))
2389 unknown_address_altered = 1;
2390 loop_info->has_call = 1;
2392 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2394 rtx label1 = NULL_RTX;
2395 rtx label2 = NULL_RTX;
2397 if (volatile_refs_p (PATTERN (insn)))
2398 loop_info->has_volatile = 1;
2400 if (GET_CODE (insn) == JUMP_INSN
2401 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2402 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2403 loop_info->has_tablejump = 1;
2405 note_stores (PATTERN (insn), note_addr_stored, NULL);
2406 if (! first_loop_store_insn && loop_store_mems)
2407 first_loop_store_insn = insn;
2409 if (! loop_info->has_multiple_exit_targets
2410 && GET_CODE (insn) == JUMP_INSN
2411 && GET_CODE (PATTERN (insn)) == SET
2412 && SET_DEST (PATTERN (insn)) == pc_rtx)
2414 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2416 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2417 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2419 else
2421 label1 = SET_SRC (PATTERN (insn));
2426 if (label1 && label1 != pc_rtx)
2428 if (GET_CODE (label1) != LABEL_REF)
2430 /* Something tricky. */
2431 loop_info->has_multiple_exit_targets = 1;
2432 break;
2434 else if (XEXP (label1, 0) != exit_target
2435 && LABEL_OUTSIDE_LOOP_P (label1))
2437 /* A jump outside the current loop. */
2438 loop_info->has_multiple_exit_targets = 1;
2439 break;
2443 label1 = label2;
2444 label2 = NULL_RTX;
2446 while (label1);
2449 else if (GET_CODE (insn) == RETURN)
2450 loop_info->has_multiple_exit_targets = 1;
2453 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2454 if (/* An exception thrown by a called function might land us
2455 anywhere. */
2456 ! loop_info->has_call
2457 /* We don't want loads for MEMs moved to a location before the
2458 one at which their stack memory becomes allocated. (Note
2459 that this is not a problem for malloc, etc., since those
2460 require actual function calls. */
2461 && ! current_function_calls_alloca
2462 /* There are ways to leave the loop other than falling off the
2463 end. */
2464 && ! loop_info->has_multiple_exit_targets)
2465 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2466 insn = NEXT_INSN (insn))
2467 for_each_rtx (&insn, insert_loop_mem, 0);
2469 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2470 that loop_invariant_p and load_mems can use true_dependence
2471 to determine what is really clobbered. */
2472 if (unknown_address_altered)
2474 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2476 loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_store_mems);
2478 if (unknown_constant_address_altered)
2480 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2482 RTX_UNCHANGING_P (mem) = 1;
2483 loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_store_mems);
2487 /* LOOP->CONT_DOMINATOR is now the last label between the loop start
2488 and the continue note that is a the destination of a (cond)jump after
2489 the continue note. If there is any (cond)jump between the loop start
2490 and what we have so far as LOOP->CONT_DOMINATOR that has a
2491 target between LOOP->CONT_DOMINATOR and the continue note, move
2492 LOOP->CONT_DOMINATOR forward to that label; if a jump's
2493 destination cannot be determined, clear LOOP->CONT_DOMINATOR. */
2495 static void
2496 verify_dominator (loop)
2497 struct loop *loop;
2499 rtx insn;
2501 if (! loop->cont_dominator)
2502 /* This can happen for an empty loop, e.g. in
2503 gcc.c-torture/compile/920410-2.c */
2504 return;
2505 if (loop->cont_dominator == const0_rtx)
2507 loop->cont_dominator = 0;
2508 return;
2510 for (insn = loop->start; insn != loop->cont_dominator;
2511 insn = NEXT_INSN (insn))
2513 if (GET_CODE (insn) == JUMP_INSN
2514 && GET_CODE (PATTERN (insn)) != RETURN)
2516 rtx label = JUMP_LABEL (insn);
2517 int label_luid;
2519 /* If it is not a jump we can easily understand or for
2520 which we do not have jump target information in the JUMP_LABEL
2521 field (consider ADDR_VEC and ADDR_DIFF_VEC insns), then clear
2522 LOOP->CONT_DOMINATOR. */
2523 if (! any_condjump_p (insn)
2524 || label == NULL_RTX)
2526 loop->cont_dominator = NULL_RTX;
2527 return;
2530 label_luid = INSN_LUID (label);
2531 if (label_luid < INSN_LUID (loop->cont)
2532 && (label_luid
2533 > INSN_LUID (loop->cont)))
2534 loop->cont_dominator = label;
2539 /* Scan the function looking for loops. Record the start and end of each loop.
2540 Also mark as invalid loops any loops that contain a setjmp or are branched
2541 to from outside the loop. */
2543 static void
2544 find_and_verify_loops (f, loops)
2545 rtx f;
2546 struct loops *loops;
2548 rtx insn;
2549 rtx label;
2550 int num_loops;
2551 struct loop *current_loop;
2552 struct loop *next_loop;
2553 struct loop *loop;
2555 num_loops = loops->num;
2557 compute_luids (f, NULL_RTX, 0);
2559 /* If there are jumps to undefined labels,
2560 treat them as jumps out of any/all loops.
2561 This also avoids writing past end of tables when there are no loops. */
2562 uid_loop[0] = NULL;
2564 /* Find boundaries of loops, mark which loops are contained within
2565 loops, and invalidate loops that have setjmp. */
2567 num_loops = 0;
2568 current_loop = NULL;
2569 for (insn = f; insn; insn = NEXT_INSN (insn))
2571 if (GET_CODE (insn) == NOTE)
2572 switch (NOTE_LINE_NUMBER (insn))
2574 case NOTE_INSN_LOOP_BEG:
2575 next_loop = loops->array + num_loops;
2576 next_loop->num = num_loops;
2577 num_loops++;
2578 next_loop->start = insn;
2579 next_loop->outer = current_loop;
2580 current_loop = next_loop;
2581 break;
2583 case NOTE_INSN_SETJMP:
2584 /* In this case, we must invalidate our current loop and any
2585 enclosing loop. */
2586 for (loop = current_loop; loop; loop = loop->outer)
2588 loop->invalid = 1;
2589 if (loop_dump_stream)
2590 fprintf (loop_dump_stream,
2591 "\nLoop at %d ignored due to setjmp.\n",
2592 INSN_UID (loop->start));
2594 break;
2596 case NOTE_INSN_LOOP_CONT:
2597 current_loop->cont = insn;
2598 break;
2600 case NOTE_INSN_LOOP_VTOP:
2601 current_loop->vtop = insn;
2602 break;
2604 case NOTE_INSN_LOOP_END:
2605 if (! current_loop)
2606 abort ();
2608 current_loop->end = insn;
2609 verify_dominator (current_loop);
2610 current_loop = current_loop->outer;
2611 break;
2613 default:
2614 break;
2616 /* If for any loop, this is a jump insn between the NOTE_INSN_LOOP_CONT
2617 and NOTE_INSN_LOOP_END notes, update loop->cont_dominator. */
2618 else if (GET_CODE (insn) == JUMP_INSN
2619 && GET_CODE (PATTERN (insn)) != RETURN
2620 && current_loop)
2622 rtx label = JUMP_LABEL (insn);
2624 if (! any_condjump_p (insn))
2625 label = NULL_RTX;
2627 loop = current_loop;
2630 /* First see if we care about this loop. */
2631 if (loop->cont && loop->cont_dominator != const0_rtx)
2633 /* If the jump destination is not known, invalidate
2634 loop->cont_dominator. */
2635 if (! label)
2636 loop->cont_dominator = const0_rtx;
2637 else
2638 /* Check if the destination is between loop start and
2639 cont. */
2640 if ((INSN_LUID (label)
2641 < INSN_LUID (loop->cont))
2642 && (INSN_LUID (label)
2643 > INSN_LUID (loop->start))
2644 /* And if there is no later destination already
2645 recorded. */
2646 && (! loop->cont_dominator
2647 || (INSN_LUID (label)
2648 > INSN_LUID (loop->cont_dominator))))
2649 loop->cont_dominator = label;
2651 loop = loop->outer;
2653 while (loop);
2656 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2657 enclosing loop, but this doesn't matter. */
2658 uid_loop[INSN_UID (insn)] = current_loop;
2661 /* Any loop containing a label used in an initializer must be invalidated,
2662 because it can be jumped into from anywhere. */
2664 for (label = forced_labels; label; label = XEXP (label, 1))
2666 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2667 loop; loop = loop->outer)
2668 loop->invalid = 1;
2671 /* Any loop containing a label used for an exception handler must be
2672 invalidated, because it can be jumped into from anywhere. */
2674 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2676 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2677 loop; loop = loop->outer)
2678 loop->invalid = 1;
2681 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2682 loop that it is not contained within, that loop is marked invalid.
2683 If any INSN or CALL_INSN uses a label's address, then the loop containing
2684 that label is marked invalid, because it could be jumped into from
2685 anywhere.
2687 Also look for blocks of code ending in an unconditional branch that
2688 exits the loop. If such a block is surrounded by a conditional
2689 branch around the block, move the block elsewhere (see below) and
2690 invert the jump to point to the code block. This may eliminate a
2691 label in our loop and will simplify processing by both us and a
2692 possible second cse pass. */
2694 for (insn = f; insn; insn = NEXT_INSN (insn))
2695 if (INSN_P (insn))
2697 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2699 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2701 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2702 if (note)
2704 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2705 loop; loop = loop->outer)
2706 loop->invalid = 1;
2710 if (GET_CODE (insn) != JUMP_INSN)
2711 continue;
2713 mark_loop_jump (PATTERN (insn), this_loop);
2715 /* See if this is an unconditional branch outside the loop. */
2716 if (this_loop
2717 && (GET_CODE (PATTERN (insn)) == RETURN
2718 || (any_uncondjump_p (insn)
2719 && onlyjump_p (insn)
2720 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2721 != this_loop)))
2722 && get_max_uid () < max_uid_for_loop)
2724 rtx p;
2725 rtx our_next = next_real_insn (insn);
2726 rtx last_insn_to_move = NEXT_INSN (insn);
2727 struct loop *dest_loop;
2728 struct loop *outer_loop = NULL;
2730 /* Go backwards until we reach the start of the loop, a label,
2731 or a JUMP_INSN. */
2732 for (p = PREV_INSN (insn);
2733 GET_CODE (p) != CODE_LABEL
2734 && ! (GET_CODE (p) == NOTE
2735 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2736 && GET_CODE (p) != JUMP_INSN;
2737 p = PREV_INSN (p))
2740 /* Check for the case where we have a jump to an inner nested
2741 loop, and do not perform the optimization in that case. */
2743 if (JUMP_LABEL (insn))
2745 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2746 if (dest_loop)
2748 for (outer_loop = dest_loop; outer_loop;
2749 outer_loop = outer_loop->outer)
2750 if (outer_loop == this_loop)
2751 break;
2755 /* Make sure that the target of P is within the current loop. */
2757 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2758 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2759 outer_loop = this_loop;
2761 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2762 we have a block of code to try to move.
2764 We look backward and then forward from the target of INSN
2765 to find a BARRIER at the same loop depth as the target.
2766 If we find such a BARRIER, we make a new label for the start
2767 of the block, invert the jump in P and point it to that label,
2768 and move the block of code to the spot we found. */
2770 if (! outer_loop
2771 && GET_CODE (p) == JUMP_INSN
2772 && JUMP_LABEL (p) != 0
2773 /* Just ignore jumps to labels that were never emitted.
2774 These always indicate compilation errors. */
2775 && INSN_UID (JUMP_LABEL (p)) != 0
2776 && any_condjump_p (p) && onlyjump_p (p)
2777 && next_real_insn (JUMP_LABEL (p)) == our_next
2778 /* If it's not safe to move the sequence, then we
2779 mustn't try. */
2780 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2781 &last_insn_to_move))
2783 rtx target
2784 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2785 struct loop *target_loop = uid_loop[INSN_UID (target)];
2786 rtx loc, loc2;
2788 for (loc = target; loc; loc = PREV_INSN (loc))
2789 if (GET_CODE (loc) == BARRIER
2790 /* Don't move things inside a tablejump. */
2791 && ((loc2 = next_nonnote_insn (loc)) == 0
2792 || GET_CODE (loc2) != CODE_LABEL
2793 || (loc2 = next_nonnote_insn (loc2)) == 0
2794 || GET_CODE (loc2) != JUMP_INSN
2795 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2796 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2797 && uid_loop[INSN_UID (loc)] == target_loop)
2798 break;
2800 if (loc == 0)
2801 for (loc = target; loc; loc = NEXT_INSN (loc))
2802 if (GET_CODE (loc) == BARRIER
2803 /* Don't move things inside a tablejump. */
2804 && ((loc2 = next_nonnote_insn (loc)) == 0
2805 || GET_CODE (loc2) != CODE_LABEL
2806 || (loc2 = next_nonnote_insn (loc2)) == 0
2807 || GET_CODE (loc2) != JUMP_INSN
2808 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2809 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2810 && uid_loop[INSN_UID (loc)] == target_loop)
2811 break;
2813 if (loc)
2815 rtx cond_label = JUMP_LABEL (p);
2816 rtx new_label = get_label_after (p);
2818 /* Ensure our label doesn't go away. */
2819 LABEL_NUSES (cond_label)++;
2821 /* Verify that uid_loop is large enough and that
2822 we can invert P. */
2823 if (invert_jump (p, new_label, 1))
2825 rtx q, r;
2827 /* If no suitable BARRIER was found, create a suitable
2828 one before TARGET. Since TARGET is a fall through
2829 path, we'll need to insert an jump around our block
2830 and a add a BARRIER before TARGET.
2832 This creates an extra unconditional jump outside
2833 the loop. However, the benefits of removing rarely
2834 executed instructions from inside the loop usually
2835 outweighs the cost of the extra unconditional jump
2836 outside the loop. */
2837 if (loc == 0)
2839 rtx temp;
2841 temp = gen_jump (JUMP_LABEL (insn));
2842 temp = emit_jump_insn_before (temp, target);
2843 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2844 LABEL_NUSES (JUMP_LABEL (insn))++;
2845 loc = emit_barrier_before (target);
2848 /* Include the BARRIER after INSN and copy the
2849 block after LOC. */
2850 new_label = squeeze_notes (new_label,
2851 last_insn_to_move);
2852 reorder_insns (new_label, last_insn_to_move, loc);
2854 /* All those insns are now in TARGET_LOOP. */
2855 for (q = new_label;
2856 q != NEXT_INSN (last_insn_to_move);
2857 q = NEXT_INSN (q))
2858 uid_loop[INSN_UID (q)] = target_loop;
2860 /* The label jumped to by INSN is no longer a loop
2861 exit. Unless INSN does not have a label (e.g.,
2862 it is a RETURN insn), search loop->exit_labels
2863 to find its label_ref, and remove it. Also turn
2864 off LABEL_OUTSIDE_LOOP_P bit. */
2865 if (JUMP_LABEL (insn))
2867 for (q = 0,
2868 r = this_loop->exit_labels;
2869 r; q = r, r = LABEL_NEXTREF (r))
2870 if (XEXP (r, 0) == JUMP_LABEL (insn))
2872 LABEL_OUTSIDE_LOOP_P (r) = 0;
2873 if (q)
2874 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2875 else
2876 this_loop->exit_labels = LABEL_NEXTREF (r);
2877 break;
2880 for (loop = this_loop; loop && loop != target_loop;
2881 loop = loop->outer)
2882 loop->exit_count--;
2884 /* If we didn't find it, then something is
2885 wrong. */
2886 if (! r)
2887 abort ();
2890 /* P is now a jump outside the loop, so it must be put
2891 in loop->exit_labels, and marked as such.
2892 The easiest way to do this is to just call
2893 mark_loop_jump again for P. */
2894 mark_loop_jump (PATTERN (p), this_loop);
2896 /* If INSN now jumps to the insn after it,
2897 delete INSN. */
2898 if (JUMP_LABEL (insn) != 0
2899 && (next_real_insn (JUMP_LABEL (insn))
2900 == next_real_insn (insn)))
2901 delete_insn (insn);
2904 /* Continue the loop after where the conditional
2905 branch used to jump, since the only branch insn
2906 in the block (if it still remains) is an inter-loop
2907 branch and hence needs no processing. */
2908 insn = NEXT_INSN (cond_label);
2910 if (--LABEL_NUSES (cond_label) == 0)
2911 delete_insn (cond_label);
2913 /* This loop will be continued with NEXT_INSN (insn). */
2914 insn = PREV_INSN (insn);
2921 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2922 loops it is contained in, mark the target loop invalid.
2924 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2926 static void
2927 mark_loop_jump (x, loop)
2928 rtx x;
2929 struct loop *loop;
2931 struct loop *dest_loop;
2932 struct loop *outer_loop;
2933 int i;
2935 switch (GET_CODE (x))
2937 case PC:
2938 case USE:
2939 case CLOBBER:
2940 case REG:
2941 case MEM:
2942 case CONST_INT:
2943 case CONST_DOUBLE:
2944 case RETURN:
2945 return;
2947 case CONST:
2948 /* There could be a label reference in here. */
2949 mark_loop_jump (XEXP (x, 0), loop);
2950 return;
2952 case PLUS:
2953 case MINUS:
2954 case MULT:
2955 mark_loop_jump (XEXP (x, 0), loop);
2956 mark_loop_jump (XEXP (x, 1), loop);
2957 return;
2959 case LO_SUM:
2960 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2961 mark_loop_jump (XEXP (x, 1), loop);
2962 return;
2964 case SIGN_EXTEND:
2965 case ZERO_EXTEND:
2966 mark_loop_jump (XEXP (x, 0), loop);
2967 return;
2969 case LABEL_REF:
2970 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2972 /* Link together all labels that branch outside the loop. This
2973 is used by final_[bg]iv_value and the loop unrolling code. Also
2974 mark this LABEL_REF so we know that this branch should predict
2975 false. */
2977 /* A check to make sure the label is not in an inner nested loop,
2978 since this does not count as a loop exit. */
2979 if (dest_loop)
2981 for (outer_loop = dest_loop; outer_loop;
2982 outer_loop = outer_loop->outer)
2983 if (outer_loop == loop)
2984 break;
2986 else
2987 outer_loop = NULL;
2989 if (loop && ! outer_loop)
2991 LABEL_OUTSIDE_LOOP_P (x) = 1;
2992 LABEL_NEXTREF (x) = loop->exit_labels;
2993 loop->exit_labels = x;
2995 for (outer_loop = loop;
2996 outer_loop && outer_loop != dest_loop;
2997 outer_loop = outer_loop->outer)
2998 outer_loop->exit_count++;
3001 /* If this is inside a loop, but not in the current loop or one enclosed
3002 by it, it invalidates at least one loop. */
3004 if (! dest_loop)
3005 return;
3007 /* We must invalidate every nested loop containing the target of this
3008 label, except those that also contain the jump insn. */
3010 for (; dest_loop; dest_loop = dest_loop->outer)
3012 /* Stop when we reach a loop that also contains the jump insn. */
3013 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3014 if (dest_loop == outer_loop)
3015 return;
3017 /* If we get here, we know we need to invalidate a loop. */
3018 if (loop_dump_stream && ! dest_loop->invalid)
3019 fprintf (loop_dump_stream,
3020 "\nLoop at %d ignored due to multiple entry points.\n",
3021 INSN_UID (dest_loop->start));
3023 dest_loop->invalid = 1;
3025 return;
3027 case SET:
3028 /* If this is not setting pc, ignore. */
3029 if (SET_DEST (x) == pc_rtx)
3030 mark_loop_jump (SET_SRC (x), loop);
3031 return;
3033 case IF_THEN_ELSE:
3034 mark_loop_jump (XEXP (x, 1), loop);
3035 mark_loop_jump (XEXP (x, 2), loop);
3036 return;
3038 case PARALLEL:
3039 case ADDR_VEC:
3040 for (i = 0; i < XVECLEN (x, 0); i++)
3041 mark_loop_jump (XVECEXP (x, 0, i), loop);
3042 return;
3044 case ADDR_DIFF_VEC:
3045 for (i = 0; i < XVECLEN (x, 1); i++)
3046 mark_loop_jump (XVECEXP (x, 1, i), loop);
3047 return;
3049 default:
3050 /* Strictly speaking this is not a jump into the loop, only a possible
3051 jump out of the loop. However, we have no way to link the destination
3052 of this jump onto the list of exit labels. To be safe we mark this
3053 loop and any containing loops as invalid. */
3054 if (loop)
3056 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3058 if (loop_dump_stream && ! outer_loop->invalid)
3059 fprintf (loop_dump_stream,
3060 "\nLoop at %d ignored due to unknown exit jump.\n",
3061 INSN_UID (outer_loop->start));
3062 outer_loop->invalid = 1;
3065 return;
3069 /* Return nonzero if there is a label in the range from
3070 insn INSN to and including the insn whose luid is END
3071 INSN must have an assigned luid (i.e., it must not have
3072 been previously created by loop.c). */
3074 static int
3075 labels_in_range_p (insn, end)
3076 rtx insn;
3077 int end;
3079 while (insn && INSN_LUID (insn) <= end)
3081 if (GET_CODE (insn) == CODE_LABEL)
3082 return 1;
3083 insn = NEXT_INSN (insn);
3086 return 0;
3089 /* Record that a memory reference X is being set. */
3091 static void
3092 note_addr_stored (x, y, data)
3093 rtx x;
3094 rtx y ATTRIBUTE_UNUSED;
3095 void *data ATTRIBUTE_UNUSED;
3097 if (x == 0 || GET_CODE (x) != MEM)
3098 return;
3100 /* Count number of memory writes.
3101 This affects heuristics in strength_reduce. */
3102 num_mem_sets++;
3104 /* BLKmode MEM means all memory is clobbered. */
3105 if (GET_MODE (x) == BLKmode)
3107 if (RTX_UNCHANGING_P (x))
3108 unknown_constant_address_altered = 1;
3109 else
3110 unknown_address_altered = 1;
3112 return;
3115 loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_store_mems);
3118 /* X is a value modified by an INSN that references a biv inside a loop
3119 exit test (ie, X is somehow related to the value of the biv). If X
3120 is a pseudo that is used more than once, then the biv is (effectively)
3121 used more than once. DATA is really an `int *', and is set if the
3122 biv is used more than once. */
3124 static void
3125 note_set_pseudo_multiple_uses (x, y, data)
3126 rtx x;
3127 rtx y ATTRIBUTE_UNUSED;
3128 void *data;
3130 if (x == 0)
3131 return;
3133 while (GET_CODE (x) == STRICT_LOW_PART
3134 || GET_CODE (x) == SIGN_EXTRACT
3135 || GET_CODE (x) == ZERO_EXTRACT
3136 || GET_CODE (x) == SUBREG)
3137 x = XEXP (x, 0);
3139 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3140 return;
3142 /* If we do not have usage information, or if we know the register
3143 is used more than once, note that fact for check_dbra_loop. */
3144 if (REGNO (x) >= max_reg_before_loop
3145 || ! VARRAY_RTX (reg_single_usage, REGNO (x))
3146 || VARRAY_RTX (reg_single_usage, REGNO (x)) == const0_rtx)
3147 *((int *) data) = 1;
3150 /* Return nonzero if the rtx X is invariant over the current loop.
3152 The value is 2 if we refer to something only conditionally invariant.
3154 A memory ref is invariant if it is not volatile and does not conflict
3155 with anything stored in `loop_store_mems'. */
3158 loop_invariant_p (loop, x)
3159 const struct loop *loop;
3160 register rtx x;
3162 register int i;
3163 register enum rtx_code code;
3164 register const char *fmt;
3165 int conditional = 0;
3166 rtx mem_list_entry;
3168 if (x == 0)
3169 return 1;
3170 code = GET_CODE (x);
3171 switch (code)
3173 case CONST_INT:
3174 case CONST_DOUBLE:
3175 case SYMBOL_REF:
3176 case CONST:
3177 return 1;
3179 case LABEL_REF:
3180 /* A LABEL_REF is normally invariant, however, if we are unrolling
3181 loops, and this label is inside the loop, then it isn't invariant.
3182 This is because each unrolled copy of the loop body will have
3183 a copy of this label. If this was invariant, then an insn loading
3184 the address of this label into a register might get moved outside
3185 the loop, and then each loop body would end up using the same label.
3187 We don't know the loop bounds here though, so just fail for all
3188 labels. */
3189 if (flag_unroll_loops)
3190 return 0;
3191 else
3192 return 1;
3194 case PC:
3195 case CC0:
3196 case UNSPEC_VOLATILE:
3197 return 0;
3199 case REG:
3200 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3201 since the reg might be set by initialization within the loop. */
3203 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3204 || x == arg_pointer_rtx)
3205 && ! current_function_has_nonlocal_goto)
3206 return 1;
3208 if (LOOP_INFO (loop)->has_call
3209 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3210 return 0;
3212 if (VARRAY_INT (set_in_loop, REGNO (x)) < 0)
3213 return 2;
3215 return VARRAY_INT (set_in_loop, REGNO (x)) == 0;
3217 case MEM:
3218 /* Volatile memory references must be rejected. Do this before
3219 checking for read-only items, so that volatile read-only items
3220 will be rejected also. */
3221 if (MEM_VOLATILE_P (x))
3222 return 0;
3224 /* See if there is any dependence between a store and this load. */
3225 mem_list_entry = loop_store_mems;
3226 while (mem_list_entry)
3228 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3229 x, rtx_varies_p))
3230 return 0;
3232 mem_list_entry = XEXP (mem_list_entry, 1);
3235 /* It's not invalidated by a store in memory
3236 but we must still verify the address is invariant. */
3237 break;
3239 case ASM_OPERANDS:
3240 /* Don't mess with insns declared volatile. */
3241 if (MEM_VOLATILE_P (x))
3242 return 0;
3243 break;
3245 default:
3246 break;
3249 fmt = GET_RTX_FORMAT (code);
3250 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3252 if (fmt[i] == 'e')
3254 int tem = loop_invariant_p (loop, XEXP (x, i));
3255 if (tem == 0)
3256 return 0;
3257 if (tem == 2)
3258 conditional = 1;
3260 else if (fmt[i] == 'E')
3262 register int j;
3263 for (j = 0; j < XVECLEN (x, i); j++)
3265 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3266 if (tem == 0)
3267 return 0;
3268 if (tem == 2)
3269 conditional = 1;
3275 return 1 + conditional;
3278 /* Return nonzero if all the insns in the loop that set REG
3279 are INSN and the immediately following insns,
3280 and if each of those insns sets REG in an invariant way
3281 (not counting uses of REG in them).
3283 The value is 2 if some of these insns are only conditionally invariant.
3285 We assume that INSN itself is the first set of REG
3286 and that its source is invariant. */
3288 static int
3289 consec_sets_invariant_p (loop, reg, n_sets, insn)
3290 const struct loop *loop;
3291 int n_sets;
3292 rtx reg, insn;
3294 rtx p = insn;
3295 unsigned int regno = REGNO (reg);
3296 rtx temp;
3297 /* Number of sets we have to insist on finding after INSN. */
3298 int count = n_sets - 1;
3299 int old = VARRAY_INT (set_in_loop, regno);
3300 int value = 0;
3301 int this;
3303 /* If N_SETS hit the limit, we can't rely on its value. */
3304 if (n_sets == 127)
3305 return 0;
3307 VARRAY_INT (set_in_loop, regno) = 0;
3309 while (count > 0)
3311 register enum rtx_code code;
3312 rtx set;
3314 p = NEXT_INSN (p);
3315 code = GET_CODE (p);
3317 /* If library call, skip to end of it. */
3318 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3319 p = XEXP (temp, 0);
3321 this = 0;
3322 if (code == INSN
3323 && (set = single_set (p))
3324 && GET_CODE (SET_DEST (set)) == REG
3325 && REGNO (SET_DEST (set)) == regno)
3327 this = loop_invariant_p (loop, SET_SRC (set));
3328 if (this != 0)
3329 value |= this;
3330 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3332 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3333 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3334 notes are OK. */
3335 this = (CONSTANT_P (XEXP (temp, 0))
3336 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3337 && loop_invariant_p (loop, XEXP (temp, 0))));
3338 if (this != 0)
3339 value |= this;
3342 if (this != 0)
3343 count--;
3344 else if (code != NOTE)
3346 VARRAY_INT (set_in_loop, regno) = old;
3347 return 0;
3351 VARRAY_INT (set_in_loop, regno) = old;
3352 /* If loop_invariant_p ever returned 2, we return 2. */
3353 return 1 + (value & 2);
3356 #if 0
3357 /* I don't think this condition is sufficient to allow INSN
3358 to be moved, so we no longer test it. */
3360 /* Return 1 if all insns in the basic block of INSN and following INSN
3361 that set REG are invariant according to TABLE. */
3363 static int
3364 all_sets_invariant_p (reg, insn, table)
3365 rtx reg, insn;
3366 short *table;
3368 register rtx p = insn;
3369 register int regno = REGNO (reg);
3371 while (1)
3373 register enum rtx_code code;
3374 p = NEXT_INSN (p);
3375 code = GET_CODE (p);
3376 if (code == CODE_LABEL || code == JUMP_INSN)
3377 return 1;
3378 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3379 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3380 && REGNO (SET_DEST (PATTERN (p))) == regno)
3382 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3383 return 0;
3387 #endif /* 0 */
3389 /* Look at all uses (not sets) of registers in X. For each, if it is
3390 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3391 a different insn, set USAGE[REGNO] to const0_rtx. */
3393 static void
3394 find_single_use_in_loop (insn, x, usage)
3395 rtx insn;
3396 rtx x;
3397 varray_type usage;
3399 enum rtx_code code = GET_CODE (x);
3400 const char *fmt = GET_RTX_FORMAT (code);
3401 int i, j;
3403 if (code == REG)
3404 VARRAY_RTX (usage, REGNO (x))
3405 = (VARRAY_RTX (usage, REGNO (x)) != 0
3406 && VARRAY_RTX (usage, REGNO (x)) != insn)
3407 ? const0_rtx : insn;
3409 else if (code == SET)
3411 /* Don't count SET_DEST if it is a REG; otherwise count things
3412 in SET_DEST because if a register is partially modified, it won't
3413 show up as a potential movable so we don't care how USAGE is set
3414 for it. */
3415 if (GET_CODE (SET_DEST (x)) != REG)
3416 find_single_use_in_loop (insn, SET_DEST (x), usage);
3417 find_single_use_in_loop (insn, SET_SRC (x), usage);
3419 else
3420 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3422 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3423 find_single_use_in_loop (insn, XEXP (x, i), usage);
3424 else if (fmt[i] == 'E')
3425 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3426 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3430 /* Count and record any set in X which is contained in INSN. Update
3431 MAY_NOT_MOVE and LAST_SET for any register set in X. */
3433 static void
3434 count_one_set (insn, x, may_not_move, last_set)
3435 rtx insn, x;
3436 varray_type may_not_move;
3437 rtx *last_set;
3439 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3440 /* Don't move a reg that has an explicit clobber.
3441 It's not worth the pain to try to do it correctly. */
3442 VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
3444 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3446 rtx dest = SET_DEST (x);
3447 while (GET_CODE (dest) == SUBREG
3448 || GET_CODE (dest) == ZERO_EXTRACT
3449 || GET_CODE (dest) == SIGN_EXTRACT
3450 || GET_CODE (dest) == STRICT_LOW_PART)
3451 dest = XEXP (dest, 0);
3452 if (GET_CODE (dest) == REG)
3454 register int regno = REGNO (dest);
3455 /* If this is the first setting of this reg
3456 in current basic block, and it was set before,
3457 it must be set in two basic blocks, so it cannot
3458 be moved out of the loop. */
3459 if (VARRAY_INT (set_in_loop, regno) > 0
3460 && last_set[regno] == 0)
3461 VARRAY_CHAR (may_not_move, regno) = 1;
3462 /* If this is not first setting in current basic block,
3463 see if reg was used in between previous one and this.
3464 If so, neither one can be moved. */
3465 if (last_set[regno] != 0
3466 && reg_used_between_p (dest, last_set[regno], insn))
3467 VARRAY_CHAR (may_not_move, regno) = 1;
3468 if (VARRAY_INT (set_in_loop, regno) < 127)
3469 ++VARRAY_INT (set_in_loop, regno);
3470 last_set[regno] = insn;
3475 /* Increment SET_IN_LOOP at the index of each register
3476 that is modified by an insn between FROM and TO.
3477 If the value of an element of SET_IN_LOOP becomes 127 or more,
3478 stop incrementing it, to avoid overflow.
3480 Store in SINGLE_USAGE[I] the single insn in which register I is
3481 used, if it is only used once. Otherwise, it is set to 0 (for no
3482 uses) or const0_rtx for more than one use. This parameter may be zero,
3483 in which case this processing is not done.
3485 Store in *COUNT_PTR the number of actual instruction
3486 in the loop. We use this to decide what is worth moving out. */
3488 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3489 In that case, it is the insn that last set reg n. */
3491 static void
3492 count_loop_regs_set (loop, may_not_move, single_usage, count_ptr, nregs)
3493 const struct loop *loop;
3494 varray_type may_not_move;
3495 varray_type single_usage;
3496 int *count_ptr;
3497 int nregs;
3499 register rtx *last_set = (rtx *) xcalloc (nregs, sizeof (rtx));
3500 register rtx insn;
3501 register int count = 0;
3503 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
3504 insn = NEXT_INSN (insn))
3506 if (INSN_P (insn))
3508 ++count;
3510 /* Record registers that have exactly one use. */
3511 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3513 /* Include uses in REG_EQUAL notes. */
3514 if (REG_NOTES (insn))
3515 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3517 if (GET_CODE (PATTERN (insn)) == SET
3518 || GET_CODE (PATTERN (insn)) == CLOBBER)
3519 count_one_set (insn, PATTERN (insn), may_not_move, last_set);
3520 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3522 register int i;
3523 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3524 count_one_set (insn, XVECEXP (PATTERN (insn), 0, i),
3525 may_not_move, last_set);
3529 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3530 bzero ((char *) last_set, nregs * sizeof (rtx));
3532 *count_ptr = count;
3534 /* Clean up. */
3535 free (last_set);
3538 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3539 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3540 contained in insn INSN is used by any insn that precedes INSN in
3541 cyclic order starting from the loop entry point.
3543 We don't want to use INSN_LUID here because if we restrict INSN to those
3544 that have a valid INSN_LUID, it means we cannot move an invariant out
3545 from an inner loop past two loops. */
3547 static int
3548 loop_reg_used_before_p (loop, set, insn)
3549 const struct loop *loop;
3550 rtx set, insn;
3552 rtx reg = SET_DEST (set);
3553 rtx p;
3555 /* Scan forward checking for register usage. If we hit INSN, we
3556 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3557 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3559 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3560 return 1;
3562 if (p == loop->end)
3563 p = loop->start;
3566 return 0;
3569 /* A "basic induction variable" or biv is a pseudo reg that is set
3570 (within this loop) only by incrementing or decrementing it. */
3571 /* A "general induction variable" or giv is a pseudo reg whose
3572 value is a linear function of a biv. */
3574 /* Bivs are recognized by `basic_induction_var';
3575 Givs by `general_induction_var'. */
3577 /* Indexed by register number, indicates whether or not register is an
3578 induction variable, and if so what type. */
3580 varray_type reg_iv_type;
3582 /* Indexed by register number, contains pointer to `struct induction'
3583 if register is an induction variable. This holds general info for
3584 all induction variables. */
3586 varray_type reg_iv_info;
3588 /* Indexed by register number, contains pointer to `struct iv_class'
3589 if register is a basic induction variable. This holds info describing
3590 the class (a related group) of induction variables that the biv belongs
3591 to. */
3593 struct iv_class **reg_biv_class;
3595 /* The head of a list which links together (via the next field)
3596 every iv class for the current loop. */
3598 struct iv_class *loop_iv_list;
3600 /* Givs made from biv increments are always splittable for loop unrolling.
3601 Since there is no regscan info for them, we have to keep track of them
3602 separately. */
3603 unsigned int first_increment_giv, last_increment_giv;
3605 /* Communication with routines called via `note_stores'. */
3607 static rtx note_insn;
3609 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3611 static rtx addr_placeholder;
3613 /* ??? Unfinished optimizations, and possible future optimizations,
3614 for the strength reduction code. */
3616 /* ??? The interaction of biv elimination, and recognition of 'constant'
3617 bivs, may cause problems. */
3619 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3620 performance problems.
3622 Perhaps don't eliminate things that can be combined with an addressing
3623 mode. Find all givs that have the same biv, mult_val, and add_val;
3624 then for each giv, check to see if its only use dies in a following
3625 memory address. If so, generate a new memory address and check to see
3626 if it is valid. If it is valid, then store the modified memory address,
3627 otherwise, mark the giv as not done so that it will get its own iv. */
3629 /* ??? Could try to optimize branches when it is known that a biv is always
3630 positive. */
3632 /* ??? When replace a biv in a compare insn, we should replace with closest
3633 giv so that an optimized branch can still be recognized by the combiner,
3634 e.g. the VAX acb insn. */
3636 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3637 was rerun in loop_optimize whenever a register was added or moved.
3638 Also, some of the optimizations could be a little less conservative. */
3640 /* Scan the loop body and call FNCALL for each insn. In the addition to the
3641 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
3642 callback.
3644 NOT_EVERY_ITERATION if current insn is not executed at least once for every
3645 loop iteration except for the last one.
3647 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
3648 loop iteration.
3650 void
3651 for_each_insn_in_loop (loop, fncall)
3652 struct loop *loop;
3653 loop_insn_callback fncall;
3655 /* This is 1 if current insn is not executed at least once for every loop
3656 iteration. */
3657 int not_every_iteration = 0;
3658 int maybe_multiple = 0;
3659 int past_loop_latch = 0;
3660 int loop_depth = 0;
3661 rtx p;
3663 /* If loop_scan_start points to the loop exit test, we have to be wary of
3664 subversive use of gotos inside expression statements. */
3665 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
3666 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
3668 /* Scan through loop to find all possible bivs. */
3670 for (p = next_insn_in_loop (loop, loop->scan_start);
3671 p != NULL_RTX;
3672 p = next_insn_in_loop (loop, p))
3674 p = fncall (loop, p, not_every_iteration, maybe_multiple);
3676 /* Past CODE_LABEL, we get to insns that may be executed multiple
3677 times. The only way we can be sure that they can't is if every
3678 jump insn between here and the end of the loop either
3679 returns, exits the loop, is a jump to a location that is still
3680 behind the label, or is a jump to the loop start. */
3682 if (GET_CODE (p) == CODE_LABEL)
3684 rtx insn = p;
3686 maybe_multiple = 0;
3688 while (1)
3690 insn = NEXT_INSN (insn);
3691 if (insn == loop->scan_start)
3692 break;
3693 if (insn == loop->end)
3695 if (loop->top != 0)
3696 insn = loop->top;
3697 else
3698 break;
3699 if (insn == loop->scan_start)
3700 break;
3703 if (GET_CODE (insn) == JUMP_INSN
3704 && GET_CODE (PATTERN (insn)) != RETURN
3705 && (!any_condjump_p (insn)
3706 || (JUMP_LABEL (insn) != 0
3707 && JUMP_LABEL (insn) != loop->scan_start
3708 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
3710 maybe_multiple = 1;
3711 break;
3716 /* Past a jump, we get to insns for which we can't count
3717 on whether they will be executed during each iteration. */
3718 /* This code appears twice in strength_reduce. There is also similar
3719 code in scan_loop. */
3720 if (GET_CODE (p) == JUMP_INSN
3721 /* If we enter the loop in the middle, and scan around to the
3722 beginning, don't set not_every_iteration for that.
3723 This can be any kind of jump, since we want to know if insns
3724 will be executed if the loop is executed. */
3725 && !(JUMP_LABEL (p) == loop->top
3726 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
3727 && any_uncondjump_p (p))
3728 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
3730 rtx label = 0;
3732 /* If this is a jump outside the loop, then it also doesn't
3733 matter. Check to see if the target of this branch is on the
3734 loop->exits_labels list. */
3736 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
3737 if (XEXP (label, 0) == JUMP_LABEL (p))
3738 break;
3740 if (!label)
3741 not_every_iteration = 1;
3744 else if (GET_CODE (p) == NOTE)
3746 /* At the virtual top of a converted loop, insns are again known to
3747 be executed each iteration: logically, the loop begins here
3748 even though the exit code has been duplicated.
3750 Insns are also again known to be executed each iteration at
3751 the LOOP_CONT note. */
3752 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3753 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3754 && loop_depth == 0)
3755 not_every_iteration = 0;
3756 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3757 loop_depth++;
3758 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3759 loop_depth--;
3762 /* Note if we pass a loop latch. If we do, then we can not clear
3763 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3764 a loop since a jump before the last CODE_LABEL may have started
3765 a new loop iteration.
3767 Note that LOOP_TOP is only set for rotated loops and we need
3768 this check for all loops, so compare against the CODE_LABEL
3769 which immediately follows LOOP_START. */
3770 if (GET_CODE (p) == JUMP_INSN
3771 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
3772 past_loop_latch = 1;
3774 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3775 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3776 or not an insn is known to be executed each iteration of the
3777 loop, whether or not any iterations are known to occur.
3779 Therefore, if we have just passed a label and have no more labels
3780 between here and the test insn of the loop, and we have not passed
3781 a jump to the top of the loop, then we know these insns will be
3782 executed each iteration. */
3784 if (not_every_iteration
3785 && !past_loop_latch
3786 && GET_CODE (p) == CODE_LABEL
3787 && no_labels_between_p (p, loop->end)
3788 && loop_insn_first_p (p, loop->cont))
3789 not_every_iteration = 0;
3793 /* Perform strength reduction and induction variable elimination.
3795 Pseudo registers created during this function will be beyond the last
3796 valid index in several tables including n_times_set and regno_last_uid.
3797 This does not cause a problem here, because the added registers cannot be
3798 givs outside of their loop, and hence will never be reconsidered.
3799 But scan_loop must check regnos to make sure they are in bounds. */
3801 static void
3802 strength_reduce (loop, insn_count, flags)
3803 struct loop *loop;
3804 int insn_count;
3805 int flags;
3807 rtx p;
3808 /* Temporary list pointers for traversing loop_iv_list. */
3809 struct iv_class *bl, **backbl;
3810 struct loop_info *loop_info = LOOP_INFO (loop);
3811 /* Ratio of extra register life span we can justify
3812 for saving an instruction. More if loop doesn't call subroutines
3813 since in that case saving an insn makes more difference
3814 and more registers are available. */
3815 /* ??? could set this to last value of threshold in move_movables */
3816 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3817 /* Map of pseudo-register replacements. */
3818 rtx *reg_map = NULL;
3819 int reg_map_size;
3820 int call_seen;
3821 rtx test;
3822 rtx end_insert_before;
3823 int n_extra_increment;
3824 int unrolled_insn_copies = 0;
3825 rtx loop_start = loop->start;
3826 rtx loop_end = loop->end;
3827 rtx loop_scan_start = loop->scan_start;
3829 VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type");
3830 VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info");
3831 reg_biv_class = (struct iv_class **)
3832 xcalloc (max_reg_before_loop, sizeof (struct iv_class *));
3834 loop_iv_list = 0;
3835 addr_placeholder = gen_reg_rtx (Pmode);
3837 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3838 must be put before this insn, so that they will appear in the right
3839 order (i.e. loop order).
3841 If loop_end is the end of the current function, then emit a
3842 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3843 dummy note insn. */
3844 if (NEXT_INSN (loop_end) != 0)
3845 end_insert_before = NEXT_INSN (loop_end);
3846 else
3847 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3849 for_each_insn_in_loop (loop, check_insn_for_bivs);
3851 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3852 Make a sanity check against n_times_set. */
3853 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3855 if (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
3856 /* Above happens if register modified by subreg, etc. */
3857 /* Make sure it is not recognized as a basic induction var: */
3858 || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
3859 /* If never incremented, it is invariant that we decided not to
3860 move. So leave it alone. */
3861 || ! bl->incremented)
3863 if (loop_dump_stream)
3864 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3865 bl->regno,
3866 (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
3867 ? "not induction variable"
3868 : (! bl->incremented ? "never incremented"
3869 : "count error")));
3871 REG_IV_TYPE (bl->regno) = NOT_BASIC_INDUCT;
3872 *backbl = bl->next;
3874 else
3876 backbl = &bl->next;
3878 if (loop_dump_stream)
3879 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3883 /* Exit if there are no bivs. */
3884 if (! loop_iv_list)
3886 /* Can still unroll the loop anyways, but indicate that there is no
3887 strength reduction info available. */
3888 if (flags & LOOP_UNROLL)
3889 unroll_loop (loop, insn_count, end_insert_before, 0);
3891 goto egress;
3894 /* Find initial value for each biv by searching backwards from loop_start,
3895 halting at first label. Also record any test condition. */
3897 call_seen = 0;
3898 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3900 note_insn = p;
3902 if (GET_CODE (p) == CALL_INSN)
3903 call_seen = 1;
3905 if (INSN_P (p))
3906 note_stores (PATTERN (p), record_initial, NULL);
3908 /* Record any test of a biv that branches around the loop if no store
3909 between it and the start of loop. We only care about tests with
3910 constants and registers and only certain of those. */
3911 if (GET_CODE (p) == JUMP_INSN
3912 && JUMP_LABEL (p) != 0
3913 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3914 && (test = get_condition_for_loop (loop, p)) != 0
3915 && GET_CODE (XEXP (test, 0)) == REG
3916 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3917 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3918 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3919 && bl->init_insn == 0)
3921 /* If an NE test, we have an initial value! */
3922 if (GET_CODE (test) == NE)
3924 bl->init_insn = p;
3925 bl->init_set = gen_rtx_SET (VOIDmode,
3926 XEXP (test, 0), XEXP (test, 1));
3928 else
3929 bl->initial_test = test;
3933 /* Look at the each biv and see if we can say anything better about its
3934 initial value from any initializing insns set up above. (This is done
3935 in two passes to avoid missing SETs in a PARALLEL.) */
3936 for (backbl = &loop_iv_list; (bl = *backbl); backbl = &bl->next)
3938 rtx src;
3939 rtx note;
3941 if (! bl->init_insn)
3942 continue;
3944 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3945 is a constant, use the value of that. */
3946 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3947 && CONSTANT_P (XEXP (note, 0)))
3948 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3949 && CONSTANT_P (XEXP (note, 0))))
3950 src = XEXP (note, 0);
3951 else
3952 src = SET_SRC (bl->init_set);
3954 if (loop_dump_stream)
3955 fprintf (loop_dump_stream,
3956 "Biv %d initialized at insn %d: initial value ",
3957 bl->regno, INSN_UID (bl->init_insn));
3959 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3960 || GET_MODE (src) == VOIDmode)
3961 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3963 bl->initial_value = src;
3965 if (loop_dump_stream)
3967 if (GET_CODE (src) == CONST_INT)
3969 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3970 fputc ('\n', loop_dump_stream);
3972 else
3974 print_rtl (loop_dump_stream, src);
3975 fprintf (loop_dump_stream, "\n");
3979 else
3981 struct iv_class *bl2 = 0;
3982 rtx increment = NULL_RTX;
3984 /* Biv initial value is not a simple move. If it is the sum of
3985 another biv and a constant, check if both bivs are incremented
3986 in lockstep. Then we are actually looking at a giv.
3987 For simplicity, we only handle the case where there is but a
3988 single increment, and the register is not used elsewhere. */
3989 if (bl->biv_count == 1
3990 && bl->regno < max_reg_before_loop
3991 && uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
3992 && GET_CODE (src) == PLUS
3993 && GET_CODE (XEXP (src, 0)) == REG
3994 && CONSTANT_P (XEXP (src, 1))
3995 && ((increment = biv_total_increment (bl)) != NULL_RTX))
3997 unsigned int regno = REGNO (XEXP (src, 0));
3999 for (bl2 = loop_iv_list; bl2; bl2 = bl2->next)
4000 if (bl2->regno == regno)
4001 break;
4004 /* Now, can we transform this biv into a giv? */
4005 if (bl2
4006 && bl2->biv_count == 1
4007 && rtx_equal_p (increment, biv_total_increment (bl2))
4008 /* init_insn is only set to insns that are before loop_start
4009 without any intervening labels. */
4010 && ! reg_set_between_p (bl2->biv->src_reg,
4011 PREV_INSN (bl->init_insn), loop_start)
4012 /* The register from BL2 must be set before the register from
4013 BL is set, or we must be able to move the latter set after
4014 the former set. Currently there can't be any labels
4015 in-between when biv_total_increment returns nonzero both times
4016 but we test it here in case some day some real cfg analysis
4017 gets used to set always_computable. */
4018 && (loop_insn_first_p (bl2->biv->insn, bl->biv->insn)
4019 ? no_labels_between_p (bl2->biv->insn, bl->biv->insn)
4020 : (! reg_used_between_p (bl->biv->src_reg, bl->biv->insn,
4021 bl2->biv->insn)
4022 && no_jumps_between_p (bl->biv->insn, bl2->biv->insn)))
4023 && validate_change (bl->biv->insn,
4024 &SET_SRC (single_set (bl->biv->insn)),
4025 copy_rtx (src), 0))
4027 rtx dominator = loop->cont_dominator;
4028 rtx giv = bl->biv->src_reg;
4029 rtx giv_insn = bl->biv->insn;
4030 rtx after_giv = NEXT_INSN (giv_insn);
4032 if (loop_dump_stream)
4033 fprintf (loop_dump_stream, "is giv of biv %d\n", bl2->regno);
4034 /* Let this giv be discovered by the generic code. */
4035 REG_IV_TYPE (bl->regno) = UNKNOWN_INDUCT;
4036 reg_biv_class[bl->regno] = (struct iv_class *) NULL_PTR;
4037 /* We can get better optimization if we can move the giv setting
4038 before the first giv use. */
4039 if (dominator
4040 && ! loop_insn_first_p (dominator, loop_scan_start)
4041 && ! reg_set_between_p (bl2->biv->src_reg, loop_start,
4042 dominator)
4043 && ! reg_used_between_p (giv, loop_start, dominator)
4044 && ! reg_used_between_p (giv, giv_insn, loop_end))
4046 rtx p;
4047 rtx next;
4049 for (next = NEXT_INSN (dominator);; next = NEXT_INSN (next))
4051 if (GET_CODE (next) == JUMP_INSN
4052 || (INSN_P (next)
4053 && insn_dependent_p (giv_insn, next)))
4054 break;
4055 #ifdef HAVE_cc0
4056 if (! INSN_P (next) || ! sets_cc0_p (PATTERN (next)))
4057 #endif
4058 dominator = next;
4060 if (loop_dump_stream)
4061 fprintf (loop_dump_stream, "move after insn %d\n",
4062 INSN_UID (dominator));
4063 /* Avoid problems with luids by actually moving the insn
4064 and adjusting all luids in the range. */
4065 reorder_insns (giv_insn, giv_insn, dominator);
4066 for (p = dominator; INSN_UID (p) >= max_uid_for_loop;)
4067 p = PREV_INSN (p);
4068 compute_luids (giv_insn, after_giv, INSN_LUID (p));
4069 /* If the only purpose of the init insn is to initialize
4070 this giv, delete it. */
4071 if (single_set (bl->init_insn)
4072 && ! reg_used_between_p (giv, bl->init_insn, loop_start))
4073 delete_insn (bl->init_insn);
4075 else if (! loop_insn_first_p (bl2->biv->insn, bl->biv->insn))
4077 rtx p = PREV_INSN (giv_insn);
4078 while (INSN_UID (p) >= max_uid_for_loop)
4079 p = PREV_INSN (p);
4080 reorder_insns (giv_insn, giv_insn, bl2->biv->insn);
4081 compute_luids (after_giv, NEXT_INSN (giv_insn),
4082 INSN_LUID (p));
4084 /* Remove this biv from the chain. */
4085 *backbl = bl->next;
4088 /* If we can't make it a giv,
4089 let biv keep initial value of "itself". */
4090 else if (loop_dump_stream)
4091 fprintf (loop_dump_stream, "is complex\n");
4095 /* If a biv is unconditionally incremented several times in a row, convert
4096 all but the last increment into a giv. */
4098 /* Get an upper bound for the number of registers
4099 we might have after all bivs have been processed. */
4100 first_increment_giv = max_reg_num ();
4101 for (n_extra_increment = 0, bl = loop_iv_list; bl; bl = bl->next)
4102 n_extra_increment += bl->biv_count - 1;
4104 /* If the loop contains volatile memory references do not allow any
4105 replacements to take place, since this could loose the volatile
4106 markers. */
4107 if (n_extra_increment && ! loop_info->has_volatile)
4109 unsigned int nregs = first_increment_giv + n_extra_increment;
4111 /* Reallocate reg_iv_type and reg_iv_info. */
4112 VARRAY_GROW (reg_iv_type, nregs);
4113 VARRAY_GROW (reg_iv_info, nregs);
4115 for (bl = loop_iv_list; bl; bl = bl->next)
4117 struct induction **vp, *v, *next;
4118 int biv_dead_after_loop = 0;
4120 /* The biv increments lists are in reverse order. Fix this
4121 first. */
4122 for (v = bl->biv, bl->biv = 0; v; v = next)
4124 next = v->next_iv;
4125 v->next_iv = bl->biv;
4126 bl->biv = v;
4129 /* We must guard against the case that an early exit between v->insn
4130 and next->insn leaves the biv live after the loop, since that
4131 would mean that we'd be missing an increment for the final
4132 value. The following test to set biv_dead_after_loop is like
4133 the first part of the test to set bl->eliminable.
4134 We don't check here if we can calculate the final value, since
4135 this can't succeed if we already know that there is a jump
4136 between v->insn and next->insn, yet next->always_executed is
4137 set and next->maybe_multiple is cleared. Such a combination
4138 implies that the jump destination is outside the loop.
4139 If we want to make this check more sophisticated, we should
4140 check each branch between v->insn and next->insn individually
4141 to see if the biv is dead at its destination. */
4143 if (uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4144 && bl->init_insn
4145 && INSN_UID (bl->init_insn) < max_uid_for_loop
4146 && (uid_luid[REGNO_FIRST_UID (bl->regno)]
4147 >= INSN_LUID (bl->init_insn))
4148 #ifdef HAVE_decrement_and_branch_until_zero
4149 && ! bl->nonneg
4150 #endif
4151 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4152 biv_dead_after_loop = 1;
4154 for (vp = &bl->biv, next = *vp; v = next, next = v->next_iv;)
4156 HOST_WIDE_INT offset;
4157 rtx set, add_val, old_reg, dest_reg, last_use_insn, note;
4158 int old_regno, new_regno;
4159 rtx next_loc_insn;
4161 if (! v->always_executed
4162 || v->maybe_multiple
4163 || GET_CODE (v->add_val) != CONST_INT
4164 || ! next->always_executed
4165 || next->maybe_multiple
4166 || ! CONSTANT_P (next->add_val)
4167 || v->mult_val != const1_rtx
4168 || next->mult_val != const1_rtx
4169 || ! (biv_dead_after_loop
4170 || no_jumps_between_p (v->insn, next->insn)))
4172 vp = &v->next_iv;
4173 continue;
4175 offset = INTVAL (v->add_val);
4176 set = single_set (v->insn);
4177 add_val = plus_constant (next->add_val, offset);
4178 old_reg = v->dest_reg;
4179 dest_reg = gen_reg_rtx (v->mode);
4181 /* Unlike reg_iv_type / reg_iv_info, the other three arrays
4182 have been allocated with some slop space, so we may not
4183 actually need to reallocate them. If we do, the following
4184 if statement will be executed just once in this loop. */
4185 if ((unsigned) max_reg_num () > n_times_set->num_elements)
4187 /* Grow all the remaining arrays. */
4188 VARRAY_GROW (set_in_loop, nregs);
4189 VARRAY_GROW (n_times_set, nregs);
4190 VARRAY_GROW (may_not_optimize, nregs);
4191 VARRAY_GROW (reg_single_usage, nregs);
4194 /* Some bivs are incremented with a multi-insn sequence.
4195 The first insn contains the add. */
4196 next_loc_insn = next->insn;
4197 while (NOTE_P (next_loc_insn)
4198 || ! loc_mentioned_in_p (next->location,
4199 PATTERN (next_loc_insn)))
4200 next_loc_insn = PREV_INSN (next_loc_insn);
4202 if (next_loc_insn == v->insn)
4203 abort ();
4205 if (! validate_change (next_loc_insn, next->location, add_val, 0))
4207 vp = &v->next_iv;
4208 continue;
4211 /* Here we can try to eliminate the increment by combining
4212 it into the uses. */
4214 /* Set last_use_insn so that we can check against it. */
4216 for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
4217 p != next_loc_insn;
4218 p = next_insn_in_loop (loop, p))
4220 if (!INSN_P (p))
4221 continue;
4222 if (reg_mentioned_p (old_reg, PATTERN (p)))
4224 last_use_insn = p;
4228 /* If we can't get the LUIDs for the insns, we can't
4229 calculate the lifetime. This is likely from unrolling
4230 of an inner loop, so there is little point in making this
4231 a DEST_REG giv anyways. */
4232 if (INSN_UID (v->insn) >= max_uid_for_loop
4233 || INSN_UID (last_use_insn) >= max_uid_for_loop
4234 || ! validate_change (v->insn, &SET_DEST (set), dest_reg, 0))
4236 /* Change the increment at NEXT back to what it was. */
4237 if (! validate_change (next_loc_insn, next->location,
4238 next->add_val, 0))
4239 abort ();
4240 vp = &v->next_iv;
4241 continue;
4243 next->add_val = add_val;
4244 v->dest_reg = dest_reg;
4245 v->giv_type = DEST_REG;
4246 v->location = &SET_SRC (set);
4247 v->cant_derive = 0;
4248 v->combined_with = 0;
4249 v->maybe_dead = 0;
4250 v->derive_adjustment = 0;
4251 v->same = 0;
4252 v->ignore = 0;
4253 v->new_reg = 0;
4254 v->final_value = 0;
4255 v->same_insn = 0;
4256 v->auto_inc_opt = 0;
4257 v->unrolled = 0;
4258 v->shared = 0;
4259 v->derived_from = 0;
4260 v->always_computable = 1;
4261 v->always_executed = 1;
4262 v->replaceable = 1;
4263 v->no_const_addval = 0;
4265 old_regno = REGNO (old_reg);
4266 new_regno = REGNO (dest_reg);
4267 VARRAY_INT (set_in_loop, old_regno)--;
4268 VARRAY_INT (set_in_loop, new_regno) = 1;
4269 VARRAY_INT (n_times_set, old_regno)--;
4270 VARRAY_INT (n_times_set, new_regno) = 1;
4271 VARRAY_CHAR (may_not_optimize, new_regno) = 0;
4273 REG_IV_TYPE (new_regno) = GENERAL_INDUCT;
4274 REG_IV_INFO (new_regno) = v;
4276 /* If next_insn has a REG_EQUAL note that mentiones OLD_REG,
4277 it must be replaced. */
4278 note = find_reg_note (next->insn, REG_EQUAL, NULL_RTX);
4279 if (note && reg_mentioned_p (old_reg, XEXP (note, 0)))
4280 XEXP (note, 0) = copy_rtx (SET_SRC (single_set (next->insn)));
4282 /* Remove the increment from the list of biv increments,
4283 and record it as a giv. */
4284 *vp = next;
4285 bl->biv_count--;
4286 v->next_iv = bl->giv;
4287 bl->giv = v;
4288 bl->giv_count++;
4289 v->benefit = rtx_cost (SET_SRC (set), SET);
4290 bl->total_benefit += v->benefit;
4292 /* Now replace the biv with DEST_REG in all insns between
4293 the replaced increment and the next increment, and
4294 remember the last insn that needed a replacement. */
4295 for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
4296 p != next_loc_insn;
4297 p = next_insn_in_loop (loop, p))
4299 rtx note;
4301 if (! INSN_P (p))
4302 continue;
4303 if (reg_mentioned_p (old_reg, PATTERN (p)))
4305 last_use_insn = p;
4306 if (! validate_replace_rtx (old_reg, dest_reg, p))
4307 abort ();
4309 for (note = REG_NOTES (p); note; note = XEXP (note, 1))
4311 if (GET_CODE (note) == EXPR_LIST)
4312 XEXP (note, 0)
4313 = replace_rtx (XEXP (note, 0), old_reg, dest_reg);
4317 v->last_use = last_use_insn;
4318 v->lifetime = INSN_LUID (last_use_insn) - INSN_LUID (v->insn);
4319 /* If the lifetime is zero, it means that this register is really
4320 a dead store. So mark this as a giv that can be ignored.
4321 This will not prevent the biv from being eliminated. */
4322 if (v->lifetime == 0)
4323 v->ignore = 1;
4325 if (loop_dump_stream)
4326 fprintf (loop_dump_stream,
4327 "Increment %d of biv %d converted to giv %d.\n\n",
4328 INSN_UID (v->insn), old_regno, new_regno);
4332 last_increment_giv = max_reg_num () - 1;
4334 /* Search the loop for general induction variables. */
4336 for_each_insn_in_loop (loop, check_insn_for_givs);
4338 /* Try to calculate and save the number of loop iterations. This is
4339 set to zero if the actual number can not be calculated. This must
4340 be called after all giv's have been identified, since otherwise it may
4341 fail if the iteration variable is a giv. */
4343 loop_iterations (loop);
4345 /* Now for each giv for which we still don't know whether or not it is
4346 replaceable, check to see if it is replaceable because its final value
4347 can be calculated. This must be done after loop_iterations is called,
4348 so that final_giv_value will work correctly. */
4350 for (bl = loop_iv_list; bl; bl = bl->next)
4352 struct induction *v;
4354 for (v = bl->giv; v; v = v->next_iv)
4355 if (! v->replaceable && ! v->not_replaceable)
4356 check_final_value (loop, v);
4359 /* Try to prove that the loop counter variable (if any) is always
4360 nonnegative; if so, record that fact with a REG_NONNEG note
4361 so that "decrement and branch until zero" insn can be used. */
4362 check_dbra_loop (loop, insn_count);
4364 /* Create reg_map to hold substitutions for replaceable giv regs.
4365 Some givs might have been made from biv increments, so look at
4366 reg_iv_type for a suitable size. */
4367 reg_map_size = reg_iv_type->num_elements;
4368 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4370 /* Examine each iv class for feasibility of strength reduction/induction
4371 variable elimination. */
4373 for (bl = loop_iv_list; bl; bl = bl->next)
4375 struct induction *v;
4376 int benefit;
4377 int all_reduced;
4378 rtx final_value = 0;
4379 unsigned int nregs;
4381 /* Test whether it will be possible to eliminate this biv
4382 provided all givs are reduced. This is possible if either
4383 the reg is not used outside the loop, or we can compute
4384 what its final value will be.
4386 For architectures with a decrement_and_branch_until_zero insn,
4387 don't do this if we put a REG_NONNEG note on the endtest for
4388 this biv. */
4390 /* Compare against bl->init_insn rather than loop_start.
4391 We aren't concerned with any uses of the biv between
4392 init_insn and loop_start since these won't be affected
4393 by the value of the biv elsewhere in the function, so
4394 long as init_insn doesn't use the biv itself.
4395 March 14, 1989 -- self@bayes.arc.nasa.gov */
4397 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4398 && bl->init_insn
4399 && INSN_UID (bl->init_insn) < max_uid_for_loop
4400 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
4401 #ifdef HAVE_decrement_and_branch_until_zero
4402 && ! bl->nonneg
4403 #endif
4404 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4405 || ((final_value = final_biv_value (loop, bl))
4406 #ifdef HAVE_decrement_and_branch_until_zero
4407 && ! bl->nonneg
4408 #endif
4410 bl->eliminable = maybe_eliminate_biv (loop, bl, 0, threshold,
4411 insn_count);
4412 else
4414 if (loop_dump_stream)
4416 fprintf (loop_dump_stream,
4417 "Cannot eliminate biv %d.\n",
4418 bl->regno);
4419 fprintf (loop_dump_stream,
4420 "First use: insn %d, last use: insn %d.\n",
4421 REGNO_FIRST_UID (bl->regno),
4422 REGNO_LAST_UID (bl->regno));
4426 /* Combine all giv's for this iv_class. */
4427 combine_givs (bl);
4429 /* This will be true at the end, if all givs which depend on this
4430 biv have been strength reduced.
4431 We can't (currently) eliminate the biv unless this is so. */
4432 all_reduced = 1;
4434 /* Check each giv in this class to see if we will benefit by reducing
4435 it. Skip giv's combined with others. */
4436 for (v = bl->giv; v; v = v->next_iv)
4438 struct induction *tv;
4440 if (v->ignore || v->same)
4441 continue;
4443 benefit = v->benefit;
4445 /* Reduce benefit if not replaceable, since we will insert
4446 a move-insn to replace the insn that calculates this giv.
4447 Don't do this unless the giv is a user variable, since it
4448 will often be marked non-replaceable because of the duplication
4449 of the exit code outside the loop. In such a case, the copies
4450 we insert are dead and will be deleted. So they don't have
4451 a cost. Similar situations exist. */
4452 /* ??? The new final_[bg]iv_value code does a much better job
4453 of finding replaceable giv's, and hence this code may no longer
4454 be necessary. */
4455 if (! v->replaceable && ! bl->eliminable
4456 && REG_USERVAR_P (v->dest_reg))
4457 benefit -= copy_cost;
4459 /* Decrease the benefit to count the add-insns that we will
4460 insert to increment the reduced reg for the giv. */
4461 benefit -= add_cost * bl->biv_count;
4463 /* Decide whether to strength-reduce this giv or to leave the code
4464 unchanged (recompute it from the biv each time it is used).
4465 This decision can be made independently for each giv. */
4467 #ifdef AUTO_INC_DEC
4468 /* Attempt to guess whether autoincrement will handle some of the
4469 new add insns; if so, increase BENEFIT (undo the subtraction of
4470 add_cost that was done above). */
4471 if (v->giv_type == DEST_ADDR
4472 && GET_CODE (v->mult_val) == CONST_INT)
4474 if (HAVE_POST_INCREMENT
4475 && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4476 benefit += add_cost * bl->biv_count;
4477 else if (HAVE_PRE_INCREMENT
4478 && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4479 benefit += add_cost * bl->biv_count;
4480 else if (HAVE_POST_DECREMENT
4481 && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4482 benefit += add_cost * bl->biv_count;
4483 else if (HAVE_PRE_DECREMENT
4484 && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4485 benefit += add_cost * bl->biv_count;
4487 #endif
4489 /* If an insn is not to be strength reduced, then set its ignore
4490 flag, and clear all_reduced. */
4492 /* A giv that depends on a reversed biv must be reduced if it is
4493 used after the loop exit, otherwise, it would have the wrong
4494 value after the loop exit. To make it simple, just reduce all
4495 of such giv's whether or not we know they are used after the loop
4496 exit. */
4498 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4499 && ! bl->reversed )
4501 if (loop_dump_stream)
4502 fprintf (loop_dump_stream,
4503 "giv of insn %d not worth while, %d vs %d.\n",
4504 INSN_UID (v->insn),
4505 v->lifetime * threshold * benefit, insn_count);
4506 v->ignore = 1;
4507 all_reduced = 0;
4509 else
4511 /* Check that we can increment the reduced giv without a
4512 multiply insn. If not, reject it. */
4514 for (tv = bl->biv; tv; tv = tv->next_iv)
4515 if (tv->mult_val == const1_rtx
4516 && ! product_cheap_p (tv->add_val, v->mult_val))
4518 if (loop_dump_stream)
4519 fprintf (loop_dump_stream,
4520 "giv of insn %d: would need a multiply.\n",
4521 INSN_UID (v->insn));
4522 v->ignore = 1;
4523 all_reduced = 0;
4524 break;
4529 /* Check for givs whose first use is their definition and whose
4530 last use is the definition of another giv. If so, it is likely
4531 dead and should not be used to derive another giv nor to
4532 eliminate a biv. */
4533 for (v = bl->giv; v; v = v->next_iv)
4535 if (v->ignore
4536 || (v->same && v->same->ignore))
4537 continue;
4539 if (v->last_use)
4541 struct induction *v1;
4543 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4544 if (v->last_use == v1->insn)
4545 v->maybe_dead = 1;
4547 else if (v->giv_type == DEST_REG
4548 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4550 struct induction *v1;
4552 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4553 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4554 v->maybe_dead = 1;
4558 /* Now that we know which givs will be reduced, try to rearrange the
4559 combinations to reduce register pressure.
4560 recombine_givs calls find_life_end, which needs reg_iv_type and
4561 reg_iv_info to be valid for all pseudos. We do the necessary
4562 reallocation here since it allows to check if there are still
4563 more bivs to process. */
4564 nregs = max_reg_num ();
4565 if (nregs > reg_iv_type->num_elements)
4567 /* If there are still more bivs to process, allocate some slack
4568 space so that we're not constantly reallocating these arrays. */
4569 if (bl->next)
4570 nregs += nregs / 4;
4571 /* Reallocate reg_iv_type and reg_iv_info. */
4572 VARRAY_GROW (reg_iv_type, nregs);
4573 VARRAY_GROW (reg_iv_info, nregs);
4575 recombine_givs (loop, bl, flags & LOOP_UNROLL);
4577 /* Reduce each giv that we decided to reduce. */
4579 for (v = bl->giv; v; v = v->next_iv)
4581 struct induction *tv;
4582 if (! v->ignore && v->same == 0)
4584 int auto_inc_opt = 0;
4586 /* If the code for derived givs immediately below has already
4587 allocated a new_reg, we must keep it. */
4588 if (! v->new_reg)
4589 v->new_reg = gen_reg_rtx (v->mode);
4591 if (v->derived_from)
4593 struct induction *d = v->derived_from;
4595 /* In case d->dest_reg is not replaceable, we have
4596 to replace it in v->insn now. */
4597 if (! d->new_reg)
4598 d->new_reg = gen_reg_rtx (d->mode);
4599 PATTERN (v->insn)
4600 = replace_rtx (PATTERN (v->insn), d->dest_reg, d->new_reg);
4601 PATTERN (v->insn)
4602 = replace_rtx (PATTERN (v->insn), v->dest_reg, v->new_reg);
4603 /* For each place where the biv is incremented, add an
4604 insn to set the new, reduced reg for the giv.
4605 We used to do this only for biv_count != 1, but
4606 this fails when there is a giv after a single biv
4607 increment, e.g. when the last giv was expressed as
4608 pre-decrement. */
4609 for (tv = bl->biv; tv; tv = tv->next_iv)
4611 /* We always emit reduced giv increments before the
4612 biv increment when bl->biv_count != 1. So by
4613 emitting the add insns for derived givs after the
4614 biv increment, they pick up the updated value of
4615 the reduced giv.
4616 If the reduced giv is processed with
4617 auto_inc_opt == 1, then it is incremented earlier
4618 than the biv, hence we'll still pick up the right
4619 value.
4620 If it's processed with auto_inc_opt == -1,
4621 that implies that the biv increment is before the
4622 first reduced giv's use. The derived giv's lifetime
4623 is after the reduced giv's lifetime, hence in this
4624 case, the biv increment doesn't matter. */
4625 emit_insn_after (copy_rtx (PATTERN (v->insn)), tv->insn);
4627 continue;
4630 #ifdef AUTO_INC_DEC
4631 /* If the target has auto-increment addressing modes, and
4632 this is an address giv, then try to put the increment
4633 immediately after its use, so that flow can create an
4634 auto-increment addressing mode. */
4635 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4636 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4637 /* We don't handle reversed biv's because bl->biv->insn
4638 does not have a valid INSN_LUID. */
4639 && ! bl->reversed
4640 && v->always_executed && ! v->maybe_multiple
4641 && INSN_UID (v->insn) < max_uid_for_loop)
4643 /* If other giv's have been combined with this one, then
4644 this will work only if all uses of the other giv's occur
4645 before this giv's insn. This is difficult to check.
4647 We simplify this by looking for the common case where
4648 there is one DEST_REG giv, and this giv's insn is the
4649 last use of the dest_reg of that DEST_REG giv. If the
4650 increment occurs after the address giv, then we can
4651 perform the optimization. (Otherwise, the increment
4652 would have to go before other_giv, and we would not be
4653 able to combine it with the address giv to get an
4654 auto-inc address.) */
4655 if (v->combined_with)
4657 struct induction *other_giv = 0;
4659 for (tv = bl->giv; tv; tv = tv->next_iv)
4660 if (tv->same == v)
4662 if (other_giv)
4663 break;
4664 else
4665 other_giv = tv;
4667 if (! tv && other_giv
4668 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4669 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4670 == INSN_UID (v->insn))
4671 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4672 auto_inc_opt = 1;
4674 /* Check for case where increment is before the address
4675 giv. Do this test in "loop order". */
4676 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4677 && (INSN_LUID (v->insn) < INSN_LUID (loop_scan_start)
4678 || (INSN_LUID (bl->biv->insn)
4679 > INSN_LUID (loop_scan_start))))
4680 || (INSN_LUID (v->insn) < INSN_LUID (loop_scan_start)
4681 && (INSN_LUID (loop_scan_start)
4682 < INSN_LUID (bl->biv->insn))))
4683 auto_inc_opt = -1;
4684 else
4685 auto_inc_opt = 1;
4687 #ifdef HAVE_cc0
4689 rtx prev;
4691 /* We can't put an insn immediately after one setting
4692 cc0, or immediately before one using cc0. */
4693 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4694 || (auto_inc_opt == -1
4695 && (prev = prev_nonnote_insn (v->insn)) != 0
4696 && INSN_P (prev)
4697 && sets_cc0_p (PATTERN (prev))))
4698 auto_inc_opt = 0;
4700 #endif
4702 if (auto_inc_opt)
4703 v->auto_inc_opt = 1;
4705 #endif
4707 /* For each place where the biv is incremented, add an insn
4708 to increment the new, reduced reg for the giv. */
4709 for (tv = bl->biv; tv; tv = tv->next_iv)
4711 rtx insert_before;
4713 if (! auto_inc_opt)
4714 insert_before = tv->insn;
4715 else if (auto_inc_opt == 1)
4716 insert_before = NEXT_INSN (v->insn);
4717 else
4718 insert_before = v->insn;
4720 if (tv->mult_val == const1_rtx)
4721 emit_iv_add_mult (tv->add_val, v->mult_val,
4722 v->new_reg, v->new_reg, insert_before);
4723 else /* tv->mult_val == const0_rtx */
4724 /* A multiply is acceptable here
4725 since this is presumed to be seldom executed. */
4726 emit_iv_add_mult (tv->add_val, v->mult_val,
4727 v->add_val, v->new_reg, insert_before);
4730 /* Add code at loop start to initialize giv's reduced reg. */
4732 emit_iv_add_mult (bl->initial_value, v->mult_val,
4733 v->add_val, v->new_reg, loop_start);
4737 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4738 as not reduced.
4740 For each giv register that can be reduced now: if replaceable,
4741 substitute reduced reg wherever the old giv occurs;
4742 else add new move insn "giv_reg = reduced_reg". */
4744 for (v = bl->giv; v; v = v->next_iv)
4746 if (v->same && v->same->ignore)
4747 v->ignore = 1;
4749 if (v->ignore)
4750 continue;
4752 /* Update expression if this was combined, in case other giv was
4753 replaced. */
4754 if (v->same)
4755 v->new_reg = replace_rtx (v->new_reg,
4756 v->same->dest_reg, v->same->new_reg);
4758 if (v->giv_type == DEST_ADDR)
4759 /* Store reduced reg as the address in the memref where we found
4760 this giv. */
4761 validate_change (v->insn, v->location, v->new_reg, 0);
4762 else if (v->replaceable)
4764 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4766 #if 0
4767 /* I can no longer duplicate the original problem. Perhaps
4768 this is unnecessary now? */
4770 /* Replaceable; it isn't strictly necessary to delete the old
4771 insn and emit a new one, because v->dest_reg is now dead.
4773 However, especially when unrolling loops, the special
4774 handling for (set REG0 REG1) in the second cse pass may
4775 make v->dest_reg live again. To avoid this problem, emit
4776 an insn to set the original giv reg from the reduced giv.
4777 We can not delete the original insn, since it may be part
4778 of a LIBCALL, and the code in flow that eliminates dead
4779 libcalls will fail if it is deleted. */
4780 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4781 v->insn);
4782 #endif
4784 else
4786 /* Not replaceable; emit an insn to set the original giv reg from
4787 the reduced giv, same as above. */
4788 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4789 v->insn);
4792 /* When a loop is reversed, givs which depend on the reversed
4793 biv, and which are live outside the loop, must be set to their
4794 correct final value. This insn is only needed if the giv is
4795 not replaceable. The correct final value is the same as the
4796 value that the giv starts the reversed loop with. */
4797 if (bl->reversed && ! v->replaceable)
4798 emit_iv_add_mult (bl->initial_value, v->mult_val,
4799 v->add_val, v->dest_reg, end_insert_before);
4800 else if (v->final_value)
4802 rtx insert_before;
4804 /* If the loop has multiple exits, emit the insn before the
4805 loop to ensure that it will always be executed no matter
4806 how the loop exits. Otherwise, emit the insn after the loop,
4807 since this is slightly more efficient. */
4808 if (loop->exit_count)
4809 insert_before = loop_start;
4810 else
4811 insert_before = end_insert_before;
4812 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4813 insert_before);
4815 #if 0
4816 /* If the insn to set the final value of the giv was emitted
4817 before the loop, then we must delete the insn inside the loop
4818 that sets it. If this is a LIBCALL, then we must delete
4819 every insn in the libcall. Note, however, that
4820 final_giv_value will only succeed when there are multiple
4821 exits if the giv is dead at each exit, hence it does not
4822 matter that the original insn remains because it is dead
4823 anyways. */
4824 /* Delete the insn inside the loop that sets the giv since
4825 the giv is now set before (or after) the loop. */
4826 delete_insn (v->insn);
4827 #endif
4830 if (loop_dump_stream)
4832 fprintf (loop_dump_stream, "giv at %d reduced to ",
4833 INSN_UID (v->insn));
4834 print_rtl (loop_dump_stream, v->new_reg);
4835 fprintf (loop_dump_stream, "\n");
4839 /* All the givs based on the biv bl have been reduced if they
4840 merit it. */
4842 /* For each giv not marked as maybe dead that has been combined with a
4843 second giv, clear any "maybe dead" mark on that second giv.
4844 v->new_reg will either be or refer to the register of the giv it
4845 combined with.
4847 Doing this clearing avoids problems in biv elimination where a
4848 giv's new_reg is a complex value that can't be put in the insn but
4849 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4850 Since the register will be used in either case, we'd prefer it be
4851 used from the simpler giv. */
4853 for (v = bl->giv; v; v = v->next_iv)
4854 if (! v->maybe_dead && v->same)
4855 v->same->maybe_dead = 0;
4857 /* Try to eliminate the biv, if it is a candidate.
4858 This won't work if ! all_reduced,
4859 since the givs we planned to use might not have been reduced.
4861 We have to be careful that we didn't initially think we could eliminate
4862 this biv because of a giv that we now think may be dead and shouldn't
4863 be used as a biv replacement.
4865 Also, there is the possibility that we may have a giv that looks
4866 like it can be used to eliminate a biv, but the resulting insn
4867 isn't valid. This can happen, for example, on the 88k, where a
4868 JUMP_INSN can compare a register only with zero. Attempts to
4869 replace it with a compare with a constant will fail.
4871 Note that in cases where this call fails, we may have replaced some
4872 of the occurrences of the biv with a giv, but no harm was done in
4873 doing so in the rare cases where it can occur. */
4875 if (all_reduced == 1 && bl->eliminable
4876 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
4878 /* ?? If we created a new test to bypass the loop entirely,
4879 or otherwise drop straight in, based on this test, then
4880 we might want to rewrite it also. This way some later
4881 pass has more hope of removing the initialization of this
4882 biv entirely. */
4884 /* If final_value != 0, then the biv may be used after loop end
4885 and we must emit an insn to set it just in case.
4887 Reversed bivs already have an insn after the loop setting their
4888 value, so we don't need another one. We can't calculate the
4889 proper final value for such a biv here anyways. */
4890 if (final_value != 0 && ! bl->reversed)
4892 rtx insert_before;
4894 /* If the loop has multiple exits, emit the insn before the
4895 loop to ensure that it will always be executed no matter
4896 how the loop exits. Otherwise, emit the insn after the
4897 loop, since this is slightly more efficient. */
4898 if (loop->exit_count)
4899 insert_before = loop_start;
4900 else
4901 insert_before = end_insert_before;
4903 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4904 end_insert_before);
4907 #if 0
4908 /* Delete all of the instructions inside the loop which set
4909 the biv, as they are all dead. If is safe to delete them,
4910 because an insn setting a biv will never be part of a libcall. */
4911 /* However, deleting them will invalidate the regno_last_uid info,
4912 so keeping them around is more convenient. Final_biv_value
4913 will only succeed when there are multiple exits if the biv
4914 is dead at each exit, hence it does not matter that the original
4915 insn remains, because it is dead anyways. */
4916 for (v = bl->biv; v; v = v->next_iv)
4917 delete_insn (v->insn);
4918 #endif
4920 if (loop_dump_stream)
4921 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4922 bl->regno);
4926 /* Go through all the instructions in the loop, making all the
4927 register substitutions scheduled in REG_MAP. */
4929 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
4930 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4931 || GET_CODE (p) == CALL_INSN)
4933 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
4934 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
4935 INSN_CODE (p) = -1;
4938 if (loop_info->n_iterations > 0)
4940 /* When we completely unroll a loop we will likely not need the increment
4941 of the loop BIV and we will not need the conditional branch at the
4942 end of the loop. */
4943 unrolled_insn_copies = insn_count - 2;
4945 #ifdef HAVE_cc0
4946 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
4947 need the comparison before the conditional branch at the end of the
4948 loop. */
4949 unrolled_insn_copies -= 1;
4950 #endif
4952 /* We'll need one copy for each loop iteration. */
4953 unrolled_insn_copies *= loop_info->n_iterations;
4955 /* A little slop to account for the ability to remove initialization
4956 code, better CSE, and other secondary benefits of completely
4957 unrolling some loops. */
4958 unrolled_insn_copies -= 1;
4960 /* Clamp the value. */
4961 if (unrolled_insn_copies < 0)
4962 unrolled_insn_copies = 0;
4965 /* Unroll loops from within strength reduction so that we can use the
4966 induction variable information that strength_reduce has already
4967 collected. Always unroll loops that would be as small or smaller
4968 unrolled than when rolled. */
4969 if ((flags & LOOP_UNROLL)
4970 || (loop_info->n_iterations > 0
4971 && unrolled_insn_copies <= insn_count))
4972 unroll_loop (loop, insn_count, end_insert_before, 1);
4974 #ifdef HAVE_doloop_end
4975 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
4976 doloop_optimize (loop);
4977 #endif /* HAVE_doloop_end */
4979 if (loop_dump_stream)
4980 fprintf (loop_dump_stream, "\n");
4982 egress:
4983 VARRAY_FREE (reg_iv_type);
4984 VARRAY_FREE (reg_iv_info);
4985 free (reg_biv_class);
4986 if (reg_map)
4987 free (reg_map);
4990 /*Record all basic induction variables calculated in the insn. */
4991 static rtx
4992 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
4993 struct loop *loop;
4994 rtx p;
4995 int not_every_iteration;
4996 int maybe_multiple;
4998 rtx set;
4999 rtx dest_reg;
5000 rtx inc_val;
5001 rtx mult_val;
5002 rtx *location;
5004 if (GET_CODE (p) == INSN
5005 && (set = single_set (p))
5006 && GET_CODE (SET_DEST (set)) == REG)
5008 dest_reg = SET_DEST (set);
5009 if (REGNO (dest_reg) < max_reg_before_loop
5010 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5011 && REG_IV_TYPE (REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5013 if (basic_induction_var (loop, SET_SRC (set),
5014 GET_MODE (SET_SRC (set)),
5015 dest_reg, p, &inc_val, &mult_val,
5016 &location))
5018 /* It is a possible basic induction variable.
5019 Create and initialize an induction structure for it. */
5021 struct induction *v
5022 = (struct induction *) oballoc (sizeof (struct induction));
5024 record_biv (v, p, dest_reg, inc_val, mult_val, location,
5025 not_every_iteration, maybe_multiple);
5026 REG_IV_TYPE (REGNO (dest_reg)) = BASIC_INDUCT;
5028 else if (REGNO (dest_reg) < max_reg_before_loop)
5029 REG_IV_TYPE (REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5032 return p;
5035 /* Record all givs calculated in the insn.
5036 A register is a giv if: it is only set once, it is a function of a
5037 biv and a constant (or invariant), and it is not a biv. */
5038 static rtx
5039 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5040 struct loop *loop;
5041 rtx p;
5042 int not_every_iteration;
5043 int maybe_multiple;
5045 rtx set;
5046 /* Look for a general induction variable in a register. */
5047 if (GET_CODE (p) == INSN
5048 && (set = single_set (p))
5049 && GET_CODE (SET_DEST (set)) == REG
5050 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
5052 rtx src_reg;
5053 rtx dest_reg;
5054 rtx add_val;
5055 rtx mult_val;
5056 int benefit;
5057 rtx regnote = 0;
5058 rtx last_consec_insn;
5060 dest_reg = SET_DEST (set);
5061 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5062 return p;
5064 if (/* SET_SRC is a giv. */
5065 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5066 &mult_val, 0, &benefit, VOIDmode)
5067 /* Equivalent expression is a giv. */
5068 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5069 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5070 &add_val, &mult_val, 0,
5071 &benefit, VOIDmode)))
5072 /* Don't try to handle any regs made by loop optimization.
5073 We have nothing on them in regno_first_uid, etc. */
5074 && REGNO (dest_reg) < max_reg_before_loop
5075 /* Don't recognize a BASIC_INDUCT_VAR here. */
5076 && dest_reg != src_reg
5077 /* This must be the only place where the register is set. */
5078 && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
5079 /* or all sets must be consecutive and make a giv. */
5080 || (benefit = consec_sets_giv (loop, benefit, p,
5081 src_reg, dest_reg,
5082 &add_val, &mult_val,
5083 &last_consec_insn))))
5085 struct induction *v
5086 = (struct induction *) oballoc (sizeof (struct induction));
5088 /* If this is a library call, increase benefit. */
5089 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5090 benefit += libcall_benefit (p);
5092 /* Skip the consecutive insns, if there are any. */
5093 if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1)
5094 p = last_consec_insn;
5096 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5097 benefit, DEST_REG, not_every_iteration,
5098 maybe_multiple, NULL_PTR);
5103 #ifndef DONT_REDUCE_ADDR
5104 /* Look for givs which are memory addresses. */
5105 /* This resulted in worse code on a VAX 8600. I wonder if it
5106 still does. */
5107 if (GET_CODE (p) == INSN)
5108 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5109 maybe_multiple);
5110 #endif
5112 /* Update the status of whether giv can derive other givs. This can
5113 change when we pass a label or an insn that updates a biv. */
5114 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5115 || GET_CODE (p) == CODE_LABEL)
5116 update_giv_derive (loop, p);
5117 return p;
5120 /* Return 1 if X is a valid source for an initial value (or as value being
5121 compared against in an initial test).
5123 X must be either a register or constant and must not be clobbered between
5124 the current insn and the start of the loop.
5126 INSN is the insn containing X. */
5128 static int
5129 valid_initial_value_p (x, insn, call_seen, loop_start)
5130 rtx x;
5131 rtx insn;
5132 int call_seen;
5133 rtx loop_start;
5135 if (CONSTANT_P (x))
5136 return 1;
5138 /* Only consider pseudos we know about initialized in insns whose luids
5139 we know. */
5140 if (GET_CODE (x) != REG
5141 || REGNO (x) >= max_reg_before_loop)
5142 return 0;
5144 /* Don't use call-clobbered registers across a call which clobbers it. On
5145 some machines, don't use any hard registers at all. */
5146 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5147 && (SMALL_REGISTER_CLASSES
5148 || (call_used_regs[REGNO (x)] && call_seen)))
5149 return 0;
5151 /* Don't use registers that have been clobbered before the start of the
5152 loop. */
5153 if (reg_set_between_p (x, insn, loop_start))
5154 return 0;
5156 return 1;
5159 /* Scan X for memory refs and check each memory address
5160 as a possible giv. INSN is the insn whose pattern X comes from.
5161 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5162 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5163 more thanonce in each loop iteration. */
5165 static void
5166 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5167 const struct loop *loop;
5168 rtx x;
5169 rtx insn;
5170 int not_every_iteration, maybe_multiple;
5172 register int i, j;
5173 register enum rtx_code code;
5174 register const char *fmt;
5176 if (x == 0)
5177 return;
5179 code = GET_CODE (x);
5180 switch (code)
5182 case REG:
5183 case CONST_INT:
5184 case CONST:
5185 case CONST_DOUBLE:
5186 case SYMBOL_REF:
5187 case LABEL_REF:
5188 case PC:
5189 case CC0:
5190 case ADDR_VEC:
5191 case ADDR_DIFF_VEC:
5192 case USE:
5193 case CLOBBER:
5194 return;
5196 case MEM:
5198 rtx src_reg;
5199 rtx add_val;
5200 rtx mult_val;
5201 int benefit;
5203 /* This code used to disable creating GIVs with mult_val == 1 and
5204 add_val == 0. However, this leads to lost optimizations when
5205 it comes time to combine a set of related DEST_ADDR GIVs, since
5206 this one would not be seen. */
5208 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5209 &mult_val, 1, &benefit, GET_MODE (x)))
5211 /* Found one; record it. */
5212 struct induction *v
5213 = (struct induction *) oballoc (sizeof (struct induction));
5215 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5216 add_val, benefit, DEST_ADDR, not_every_iteration,
5217 maybe_multiple, &XEXP (x, 0));
5219 v->mem_mode = GET_MODE (x);
5222 return;
5224 default:
5225 break;
5228 /* Recursively scan the subexpressions for other mem refs. */
5230 fmt = GET_RTX_FORMAT (code);
5231 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5232 if (fmt[i] == 'e')
5233 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5234 maybe_multiple);
5235 else if (fmt[i] == 'E')
5236 for (j = 0; j < XVECLEN (x, i); j++)
5237 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5238 maybe_multiple);
5241 /* Fill in the data about one biv update.
5242 V is the `struct induction' in which we record the biv. (It is
5243 allocated by the caller, with alloca.)
5244 INSN is the insn that sets it.
5245 DEST_REG is the biv's reg.
5247 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5248 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5249 being set to INC_VAL.
5251 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5252 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5253 can be executed more than once per iteration. If MAYBE_MULTIPLE
5254 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5255 executed exactly once per iteration. */
5257 static void
5258 record_biv (v, insn, dest_reg, inc_val, mult_val, location,
5259 not_every_iteration, maybe_multiple)
5260 struct induction *v;
5261 rtx insn;
5262 rtx dest_reg;
5263 rtx inc_val;
5264 rtx mult_val;
5265 rtx *location;
5266 int not_every_iteration;
5267 int maybe_multiple;
5269 struct iv_class *bl;
5271 v->insn = insn;
5272 v->src_reg = dest_reg;
5273 v->dest_reg = dest_reg;
5274 v->mult_val = mult_val;
5275 v->add_val = inc_val;
5276 v->location = location;
5277 v->mode = GET_MODE (dest_reg);
5278 v->always_computable = ! not_every_iteration;
5279 v->always_executed = ! not_every_iteration;
5280 v->maybe_multiple = maybe_multiple;
5282 /* Add this to the reg's iv_class, creating a class
5283 if this is the first incrementation of the reg. */
5285 bl = reg_biv_class[REGNO (dest_reg)];
5286 if (bl == 0)
5288 /* Create and initialize new iv_class. */
5290 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
5292 bl->regno = REGNO (dest_reg);
5293 bl->biv = 0;
5294 bl->giv = 0;
5295 bl->biv_count = 0;
5296 bl->giv_count = 0;
5298 /* Set initial value to the reg itself. */
5299 bl->initial_value = dest_reg;
5300 /* We haven't seen the initializing insn yet */
5301 bl->init_insn = 0;
5302 bl->init_set = 0;
5303 bl->initial_test = 0;
5304 bl->incremented = 0;
5305 bl->eliminable = 0;
5306 bl->nonneg = 0;
5307 bl->reversed = 0;
5308 bl->total_benefit = 0;
5310 /* Add this class to loop_iv_list. */
5311 bl->next = loop_iv_list;
5312 loop_iv_list = bl;
5314 /* Put it in the array of biv register classes. */
5315 reg_biv_class[REGNO (dest_reg)] = bl;
5318 /* Update IV_CLASS entry for this biv. */
5319 v->next_iv = bl->biv;
5320 bl->biv = v;
5321 bl->biv_count++;
5322 if (mult_val == const1_rtx)
5323 bl->incremented = 1;
5325 if (loop_dump_stream)
5327 fprintf (loop_dump_stream,
5328 "Insn %d: possible biv, reg %d,",
5329 INSN_UID (insn), REGNO (dest_reg));
5330 if (GET_CODE (inc_val) == CONST_INT)
5332 fprintf (loop_dump_stream, " const =");
5333 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
5334 fputc ('\n', loop_dump_stream);
5336 else
5338 fprintf (loop_dump_stream, " const = ");
5339 print_rtl (loop_dump_stream, inc_val);
5340 fprintf (loop_dump_stream, "\n");
5345 /* Fill in the data about one giv.
5346 V is the `struct induction' in which we record the giv. (It is
5347 allocated by the caller, with alloca.)
5348 INSN is the insn that sets it.
5349 BENEFIT estimates the savings from deleting this insn.
5350 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5351 into a register or is used as a memory address.
5353 SRC_REG is the biv reg which the giv is computed from.
5354 DEST_REG is the giv's reg (if the giv is stored in a reg).
5355 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5356 LOCATION points to the place where this giv's value appears in INSN. */
5358 static void
5359 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
5360 type, not_every_iteration, maybe_multiple, location)
5361 const struct loop *loop;
5362 struct induction *v;
5363 rtx insn;
5364 rtx src_reg;
5365 rtx dest_reg;
5366 rtx mult_val, add_val;
5367 int benefit;
5368 enum g_types type;
5369 int not_every_iteration, maybe_multiple;
5370 rtx *location;
5372 struct induction *b;
5373 struct iv_class *bl;
5374 rtx set = single_set (insn);
5375 rtx temp;
5377 /* Attempt to prove constantness of the values. */
5378 temp = simplify_rtx (add_val);
5379 if (temp)
5380 add_val = temp;
5382 v->insn = insn;
5383 v->src_reg = src_reg;
5384 v->giv_type = type;
5385 v->dest_reg = dest_reg;
5386 v->mult_val = mult_val;
5387 v->add_val = add_val;
5388 v->benefit = benefit;
5389 v->location = location;
5390 v->cant_derive = 0;
5391 v->combined_with = 0;
5392 v->maybe_multiple = maybe_multiple;
5393 v->maybe_dead = 0;
5394 v->derive_adjustment = 0;
5395 v->same = 0;
5396 v->ignore = 0;
5397 v->new_reg = 0;
5398 v->final_value = 0;
5399 v->same_insn = 0;
5400 v->auto_inc_opt = 0;
5401 v->unrolled = 0;
5402 v->shared = 0;
5403 v->derived_from = 0;
5404 v->last_use = 0;
5406 /* The v->always_computable field is used in update_giv_derive, to
5407 determine whether a giv can be used to derive another giv. For a
5408 DEST_REG giv, INSN computes a new value for the giv, so its value
5409 isn't computable if INSN insn't executed every iteration.
5410 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5411 it does not compute a new value. Hence the value is always computable
5412 regardless of whether INSN is executed each iteration. */
5414 if (type == DEST_ADDR)
5415 v->always_computable = 1;
5416 else
5417 v->always_computable = ! not_every_iteration;
5419 v->always_executed = ! not_every_iteration;
5421 if (type == DEST_ADDR)
5423 v->mode = GET_MODE (*location);
5424 v->lifetime = 1;
5426 else /* type == DEST_REG */
5428 v->mode = GET_MODE (SET_DEST (set));
5430 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
5431 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
5433 /* If the lifetime is zero, it means that this register is
5434 really a dead store. So mark this as a giv that can be
5435 ignored. This will not prevent the biv from being eliminated. */
5436 if (v->lifetime == 0)
5437 v->ignore = 1;
5439 REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
5440 REG_IV_INFO (REGNO (dest_reg)) = v;
5443 /* Add the giv to the class of givs computed from one biv. */
5445 bl = reg_biv_class[REGNO (src_reg)];
5446 if (bl)
5448 v->next_iv = bl->giv;
5449 bl->giv = v;
5450 /* Don't count DEST_ADDR. This is supposed to count the number of
5451 insns that calculate givs. */
5452 if (type == DEST_REG)
5453 bl->giv_count++;
5454 bl->total_benefit += benefit;
5456 else
5457 /* Fatal error, biv missing for this giv? */
5458 abort ();
5460 if (type == DEST_ADDR)
5461 v->replaceable = 1;
5462 else
5464 /* The giv can be replaced outright by the reduced register only if all
5465 of the following conditions are true:
5466 - the insn that sets the giv is always executed on any iteration
5467 on which the giv is used at all
5468 (there are two ways to deduce this:
5469 either the insn is executed on every iteration,
5470 or all uses follow that insn in the same basic block),
5471 - the giv is not used outside the loop
5472 - no assignments to the biv occur during the giv's lifetime. */
5474 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5475 /* Previous line always fails if INSN was moved by loop opt. */
5476 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
5477 < INSN_LUID (loop->end)
5478 && (! not_every_iteration
5479 || last_use_this_basic_block (dest_reg, insn)))
5481 /* Now check that there are no assignments to the biv within the
5482 giv's lifetime. This requires two separate checks. */
5484 /* Check each biv update, and fail if any are between the first
5485 and last use of the giv.
5487 If this loop contains an inner loop that was unrolled, then
5488 the insn modifying the biv may have been emitted by the loop
5489 unrolling code, and hence does not have a valid luid. Just
5490 mark the biv as not replaceable in this case. It is not very
5491 useful as a biv, because it is used in two different loops.
5492 It is very unlikely that we would be able to optimize the giv
5493 using this biv anyways. */
5495 v->replaceable = 1;
5496 for (b = bl->biv; b; b = b->next_iv)
5498 if (INSN_UID (b->insn) >= max_uid_for_loop
5499 || ((uid_luid[INSN_UID (b->insn)]
5500 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
5501 && (uid_luid[INSN_UID (b->insn)]
5502 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
5504 v->replaceable = 0;
5505 v->not_replaceable = 1;
5506 break;
5510 /* If there are any backwards branches that go from after the
5511 biv update to before it, then this giv is not replaceable. */
5512 if (v->replaceable)
5513 for (b = bl->biv; b; b = b->next_iv)
5514 if (back_branch_in_range_p (loop, b->insn))
5516 v->replaceable = 0;
5517 v->not_replaceable = 1;
5518 break;
5521 else
5523 /* May still be replaceable, we don't have enough info here to
5524 decide. */
5525 v->replaceable = 0;
5526 v->not_replaceable = 0;
5530 /* Record whether the add_val contains a const_int, for later use by
5531 combine_givs. */
5533 rtx tem = add_val;
5535 v->no_const_addval = 1;
5536 if (tem == const0_rtx)
5538 else if (CONSTANT_P (add_val))
5539 v->no_const_addval = 0;
5540 if (GET_CODE (tem) == PLUS)
5542 while (1)
5544 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5545 tem = XEXP (tem, 0);
5546 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5547 tem = XEXP (tem, 1);
5548 else
5549 break;
5551 if (CONSTANT_P (XEXP (tem, 1)))
5552 v->no_const_addval = 0;
5556 if (loop_dump_stream)
5558 if (type == DEST_REG)
5559 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
5560 INSN_UID (insn), REGNO (dest_reg));
5561 else
5562 fprintf (loop_dump_stream, "Insn %d: dest address",
5563 INSN_UID (insn));
5565 fprintf (loop_dump_stream, " src reg %d benefit %d",
5566 REGNO (src_reg), v->benefit);
5567 fprintf (loop_dump_stream, " lifetime %d",
5568 v->lifetime);
5570 if (v->replaceable)
5571 fprintf (loop_dump_stream, " replaceable");
5573 if (v->no_const_addval)
5574 fprintf (loop_dump_stream, " ncav");
5576 if (GET_CODE (mult_val) == CONST_INT)
5578 fprintf (loop_dump_stream, " mult ");
5579 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
5581 else
5583 fprintf (loop_dump_stream, " mult ");
5584 print_rtl (loop_dump_stream, mult_val);
5587 if (GET_CODE (add_val) == CONST_INT)
5589 fprintf (loop_dump_stream, " add ");
5590 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
5592 else
5594 fprintf (loop_dump_stream, " add ");
5595 print_rtl (loop_dump_stream, add_val);
5599 if (loop_dump_stream)
5600 fprintf (loop_dump_stream, "\n");
5604 /* All this does is determine whether a giv can be made replaceable because
5605 its final value can be calculated. This code can not be part of record_giv
5606 above, because final_giv_value requires that the number of loop iterations
5607 be known, and that can not be accurately calculated until after all givs
5608 have been identified. */
5610 static void
5611 check_final_value (loop, v)
5612 const struct loop *loop;
5613 struct induction *v;
5615 struct iv_class *bl;
5616 rtx final_value = 0;
5618 bl = reg_biv_class[REGNO (v->src_reg)];
5620 /* DEST_ADDR givs will never reach here, because they are always marked
5621 replaceable above in record_giv. */
5623 /* The giv can be replaced outright by the reduced register only if all
5624 of the following conditions are true:
5625 - the insn that sets the giv is always executed on any iteration
5626 on which the giv is used at all
5627 (there are two ways to deduce this:
5628 either the insn is executed on every iteration,
5629 or all uses follow that insn in the same basic block),
5630 - its final value can be calculated (this condition is different
5631 than the one above in record_giv)
5632 - no assignments to the biv occur during the giv's lifetime. */
5634 #if 0
5635 /* This is only called now when replaceable is known to be false. */
5636 /* Clear replaceable, so that it won't confuse final_giv_value. */
5637 v->replaceable = 0;
5638 #endif
5640 if ((final_value = final_giv_value (loop, v))
5641 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5643 int biv_increment_seen = 0;
5644 rtx p = v->insn;
5645 rtx last_giv_use;
5647 v->replaceable = 1;
5649 /* When trying to determine whether or not a biv increment occurs
5650 during the lifetime of the giv, we can ignore uses of the variable
5651 outside the loop because final_value is true. Hence we can not
5652 use regno_last_uid and regno_first_uid as above in record_giv. */
5654 /* Search the loop to determine whether any assignments to the
5655 biv occur during the giv's lifetime. Start with the insn
5656 that sets the giv, and search around the loop until we come
5657 back to that insn again.
5659 Also fail if there is a jump within the giv's lifetime that jumps
5660 to somewhere outside the lifetime but still within the loop. This
5661 catches spaghetti code where the execution order is not linear, and
5662 hence the above test fails. Here we assume that the giv lifetime
5663 does not extend from one iteration of the loop to the next, so as
5664 to make the test easier. Since the lifetime isn't known yet,
5665 this requires two loops. See also record_giv above. */
5667 last_giv_use = v->insn;
5669 while (1)
5671 p = NEXT_INSN (p);
5672 if (p == loop->end)
5673 p = NEXT_INSN (loop->start);
5674 if (p == v->insn)
5675 break;
5677 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5678 || GET_CODE (p) == CALL_INSN)
5680 /* It is possible for the BIV increment to use the GIV if we
5681 have a cycle. Thus we must be sure to check each insn for
5682 both BIV and GIV uses, and we must check for BIV uses
5683 first. */
5685 if (! biv_increment_seen
5686 && reg_set_p (v->src_reg, PATTERN (p)))
5687 biv_increment_seen = 1;
5689 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5691 if (biv_increment_seen)
5693 v->replaceable = 0;
5694 v->not_replaceable = 1;
5695 break;
5697 last_giv_use = p;
5702 /* Now that the lifetime of the giv is known, check for branches
5703 from within the lifetime to outside the lifetime if it is still
5704 replaceable. */
5706 if (v->replaceable)
5708 p = v->insn;
5709 while (1)
5711 p = NEXT_INSN (p);
5712 if (p == loop->end)
5713 p = NEXT_INSN (loop->start);
5714 if (p == last_giv_use)
5715 break;
5717 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5718 && LABEL_NAME (JUMP_LABEL (p))
5719 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5720 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5721 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5722 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5724 v->replaceable = 0;
5725 v->not_replaceable = 1;
5727 if (loop_dump_stream)
5728 fprintf (loop_dump_stream,
5729 "Found branch outside giv lifetime.\n");
5731 break;
5736 /* If it is replaceable, then save the final value. */
5737 if (v->replaceable)
5738 v->final_value = final_value;
5741 if (loop_dump_stream && v->replaceable)
5742 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5743 INSN_UID (v->insn), REGNO (v->dest_reg));
5746 /* Update the status of whether a giv can derive other givs.
5748 We need to do something special if there is or may be an update to the biv
5749 between the time the giv is defined and the time it is used to derive
5750 another giv.
5752 In addition, a giv that is only conditionally set is not allowed to
5753 derive another giv once a label has been passed.
5755 The cases we look at are when a label or an update to a biv is passed. */
5757 static void
5758 update_giv_derive (loop, p)
5759 const struct loop *loop;
5760 rtx p;
5762 struct iv_class *bl;
5763 struct induction *biv, *giv;
5764 rtx tem;
5765 int dummy;
5767 /* Search all IV classes, then all bivs, and finally all givs.
5769 There are three cases we are concerned with. First we have the situation
5770 of a giv that is only updated conditionally. In that case, it may not
5771 derive any givs after a label is passed.
5773 The second case is when a biv update occurs, or may occur, after the
5774 definition of a giv. For certain biv updates (see below) that are
5775 known to occur between the giv definition and use, we can adjust the
5776 giv definition. For others, or when the biv update is conditional,
5777 we must prevent the giv from deriving any other givs. There are two
5778 sub-cases within this case.
5780 If this is a label, we are concerned with any biv update that is done
5781 conditionally, since it may be done after the giv is defined followed by
5782 a branch here (actually, we need to pass both a jump and a label, but
5783 this extra tracking doesn't seem worth it).
5785 If this is a jump, we are concerned about any biv update that may be
5786 executed multiple times. We are actually only concerned about
5787 backward jumps, but it is probably not worth performing the test
5788 on the jump again here.
5790 If this is a biv update, we must adjust the giv status to show that a
5791 subsequent biv update was performed. If this adjustment cannot be done,
5792 the giv cannot derive further givs. */
5794 for (bl = loop_iv_list; bl; bl = bl->next)
5795 for (biv = bl->biv; biv; biv = biv->next_iv)
5796 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5797 || biv->insn == p)
5799 for (giv = bl->giv; giv; giv = giv->next_iv)
5801 /* If cant_derive is already true, there is no point in
5802 checking all of these conditions again. */
5803 if (giv->cant_derive)
5804 continue;
5806 /* If this giv is conditionally set and we have passed a label,
5807 it cannot derive anything. */
5808 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5809 giv->cant_derive = 1;
5811 /* Skip givs that have mult_val == 0, since
5812 they are really invariants. Also skip those that are
5813 replaceable, since we know their lifetime doesn't contain
5814 any biv update. */
5815 else if (giv->mult_val == const0_rtx || giv->replaceable)
5816 continue;
5818 /* The only way we can allow this giv to derive another
5819 is if this is a biv increment and we can form the product
5820 of biv->add_val and giv->mult_val. In this case, we will
5821 be able to compute a compensation. */
5822 else if (biv->insn == p)
5824 tem = 0;
5826 if (biv->mult_val == const1_rtx)
5827 tem = simplify_giv_expr (loop,
5828 gen_rtx_MULT (giv->mode,
5829 biv->add_val,
5830 giv->mult_val),
5831 &dummy);
5833 if (tem && giv->derive_adjustment)
5834 tem = simplify_giv_expr
5835 (loop,
5836 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5837 &dummy);
5839 if (tem)
5840 giv->derive_adjustment = tem;
5841 else
5842 giv->cant_derive = 1;
5844 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5845 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5846 giv->cant_derive = 1;
5851 /* Check whether an insn is an increment legitimate for a basic induction var.
5852 X is the source of insn P, or a part of it.
5853 MODE is the mode in which X should be interpreted.
5855 DEST_REG is the putative biv, also the destination of the insn.
5856 We accept patterns of these forms:
5857 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5858 REG = INVARIANT + REG
5860 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5861 store the additive term into *INC_VAL, and store the place where
5862 we found the additive term into *LOCATION.
5864 If X is an assignment of an invariant into DEST_REG, we set
5865 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5867 We also want to detect a BIV when it corresponds to a variable
5868 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5869 of the variable may be a PLUS that adds a SUBREG of that variable to
5870 an invariant and then sign- or zero-extends the result of the PLUS
5871 into the variable.
5873 Most GIVs in such cases will be in the promoted mode, since that is the
5874 probably the natural computation mode (and almost certainly the mode
5875 used for addresses) on the machine. So we view the pseudo-reg containing
5876 the variable as the BIV, as if it were simply incremented.
5878 Note that treating the entire pseudo as a BIV will result in making
5879 simple increments to any GIVs based on it. However, if the variable
5880 overflows in its declared mode but not its promoted mode, the result will
5881 be incorrect. This is acceptable if the variable is signed, since
5882 overflows in such cases are undefined, but not if it is unsigned, since
5883 those overflows are defined. So we only check for SIGN_EXTEND and
5884 not ZERO_EXTEND.
5886 If we cannot find a biv, we return 0. */
5888 static int
5889 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
5890 const struct loop *loop;
5891 register rtx x;
5892 enum machine_mode mode;
5893 rtx dest_reg;
5894 rtx p;
5895 rtx *inc_val;
5896 rtx *mult_val;
5897 rtx **location;
5899 register enum rtx_code code;
5900 rtx *argp, arg;
5901 rtx insn, set = 0;
5903 code = GET_CODE (x);
5904 *location = NULL;
5905 switch (code)
5907 case PLUS:
5908 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5909 || (GET_CODE (XEXP (x, 0)) == SUBREG
5910 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5911 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5913 argp = &XEXP (x, 1);
5915 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5916 || (GET_CODE (XEXP (x, 1)) == SUBREG
5917 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5918 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5920 argp = &XEXP (x, 0);
5922 else
5923 return 0;
5925 arg = *argp;
5926 if (loop_invariant_p (loop, arg) != 1)
5927 return 0;
5929 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5930 *mult_val = const1_rtx;
5931 *location = argp;
5932 return 1;
5934 case SUBREG:
5935 /* If this is a SUBREG for a promoted variable, check the inner
5936 value. */
5937 if (SUBREG_PROMOTED_VAR_P (x))
5938 return basic_induction_var (loop, SUBREG_REG (x),
5939 GET_MODE (SUBREG_REG (x)),
5940 dest_reg, p, inc_val, mult_val, location);
5941 return 0;
5943 case REG:
5944 /* If this register is assigned in a previous insn, look at its
5945 source, but don't go outside the loop or past a label. */
5947 /* If this sets a register to itself, we would repeat any previous
5948 biv increment if we applied this strategy blindly. */
5949 if (rtx_equal_p (dest_reg, x))
5950 return 0;
5952 insn = p;
5953 while (1)
5957 insn = PREV_INSN (insn);
5959 while (insn && GET_CODE (insn) == NOTE
5960 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5962 if (!insn)
5963 break;
5964 set = single_set (insn);
5965 if (set == 0)
5966 break;
5968 if ((SET_DEST (set) == x
5969 || (GET_CODE (SET_DEST (set)) == SUBREG
5970 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5971 <= UNITS_PER_WORD)
5972 && (GET_MODE_CLASS (GET_MODE (SET_DEST (set)))
5973 == MODE_INT)
5974 && SUBREG_REG (SET_DEST (set)) == x))
5975 && basic_induction_var (loop, SET_SRC (set),
5976 (GET_MODE (SET_SRC (set)) == VOIDmode
5977 ? GET_MODE (x)
5978 : GET_MODE (SET_SRC (set))),
5979 dest_reg, insn,
5980 inc_val, mult_val, location))
5981 return 1;
5983 /* ... fall through ... */
5985 /* Can accept constant setting of biv only when inside inner most loop.
5986 Otherwise, a biv of an inner loop may be incorrectly recognized
5987 as a biv of the outer loop,
5988 causing code to be moved INTO the inner loop. */
5989 case MEM:
5990 if (loop_invariant_p (loop, x) != 1)
5991 return 0;
5992 case CONST_INT:
5993 case SYMBOL_REF:
5994 case CONST:
5995 /* convert_modes aborts if we try to convert to or from CCmode, so just
5996 exclude that case. It is very unlikely that a condition code value
5997 would be a useful iterator anyways. */
5998 if (loop->level == 1
5999 && GET_MODE_CLASS (mode) != MODE_CC
6000 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
6002 /* Possible bug here? Perhaps we don't know the mode of X. */
6003 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6004 *mult_val = const0_rtx;
6005 return 1;
6007 else
6008 return 0;
6010 case SIGN_EXTEND:
6011 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6012 dest_reg, p, inc_val, mult_val, location);
6014 case ASHIFTRT:
6015 /* Similar, since this can be a sign extension. */
6016 for (insn = PREV_INSN (p);
6017 (insn && GET_CODE (insn) == NOTE
6018 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6019 insn = PREV_INSN (insn))
6022 if (insn)
6023 set = single_set (insn);
6025 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6026 && set && SET_DEST (set) == XEXP (x, 0)
6027 && GET_CODE (XEXP (x, 1)) == CONST_INT
6028 && INTVAL (XEXP (x, 1)) >= 0
6029 && GET_CODE (SET_SRC (set)) == ASHIFT
6030 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6031 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6032 GET_MODE (XEXP (x, 0)),
6033 dest_reg, insn, inc_val, mult_val,
6034 location);
6035 return 0;
6037 default:
6038 return 0;
6042 /* A general induction variable (giv) is any quantity that is a linear
6043 function of a basic induction variable,
6044 i.e. giv = biv * mult_val + add_val.
6045 The coefficients can be any loop invariant quantity.
6046 A giv need not be computed directly from the biv;
6047 it can be computed by way of other givs. */
6049 /* Determine whether X computes a giv.
6050 If it does, return a nonzero value
6051 which is the benefit from eliminating the computation of X;
6052 set *SRC_REG to the register of the biv that it is computed from;
6053 set *ADD_VAL and *MULT_VAL to the coefficients,
6054 such that the value of X is biv * mult + add; */
6056 static int
6057 general_induction_var (loop, x, src_reg, add_val, mult_val, is_addr,
6058 pbenefit, addr_mode)
6059 const struct loop *loop;
6060 rtx x;
6061 rtx *src_reg;
6062 rtx *add_val;
6063 rtx *mult_val;
6064 int is_addr;
6065 int *pbenefit;
6066 enum machine_mode addr_mode;
6068 rtx orig_x = x;
6069 char *storage;
6071 /* If this is an invariant, forget it, it isn't a giv. */
6072 if (loop_invariant_p (loop, x) == 1)
6073 return 0;
6075 /* See if the expression could be a giv and get its form.
6076 Mark our place on the obstack in case we don't find a giv. */
6077 storage = (char *) oballoc (0);
6078 *pbenefit = 0;
6079 x = simplify_giv_expr (loop, x, pbenefit);
6080 if (x == 0)
6082 obfree (storage);
6083 return 0;
6086 switch (GET_CODE (x))
6088 case USE:
6089 case CONST_INT:
6090 /* Since this is now an invariant and wasn't before, it must be a giv
6091 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6092 with. */
6093 *src_reg = loop_iv_list->biv->dest_reg;
6094 *mult_val = const0_rtx;
6095 *add_val = x;
6096 break;
6098 case REG:
6099 /* This is equivalent to a BIV. */
6100 *src_reg = x;
6101 *mult_val = const1_rtx;
6102 *add_val = const0_rtx;
6103 break;
6105 case PLUS:
6106 /* Either (plus (biv) (invar)) or
6107 (plus (mult (biv) (invar_1)) (invar_2)). */
6108 if (GET_CODE (XEXP (x, 0)) == MULT)
6110 *src_reg = XEXP (XEXP (x, 0), 0);
6111 *mult_val = XEXP (XEXP (x, 0), 1);
6113 else
6115 *src_reg = XEXP (x, 0);
6116 *mult_val = const1_rtx;
6118 *add_val = XEXP (x, 1);
6119 break;
6121 case MULT:
6122 /* ADD_VAL is zero. */
6123 *src_reg = XEXP (x, 0);
6124 *mult_val = XEXP (x, 1);
6125 *add_val = const0_rtx;
6126 break;
6128 default:
6129 abort ();
6132 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6133 unless they are CONST_INT). */
6134 if (GET_CODE (*add_val) == USE)
6135 *add_val = XEXP (*add_val, 0);
6136 if (GET_CODE (*mult_val) == USE)
6137 *mult_val = XEXP (*mult_val, 0);
6139 if (is_addr)
6140 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6141 else
6142 *pbenefit += rtx_cost (orig_x, SET);
6144 /* Always return true if this is a giv so it will be detected as such,
6145 even if the benefit is zero or negative. This allows elimination
6146 of bivs that might otherwise not be eliminated. */
6147 return 1;
6150 /* Given an expression, X, try to form it as a linear function of a biv.
6151 We will canonicalize it to be of the form
6152 (plus (mult (BIV) (invar_1))
6153 (invar_2))
6154 with possible degeneracies.
6156 The invariant expressions must each be of a form that can be used as a
6157 machine operand. We surround then with a USE rtx (a hack, but localized
6158 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6159 routine; it is the caller's responsibility to strip them.
6161 If no such canonicalization is possible (i.e., two biv's are used or an
6162 expression that is neither invariant nor a biv or giv), this routine
6163 returns 0.
6165 For a non-zero return, the result will have a code of CONST_INT, USE,
6166 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6168 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6170 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6171 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6172 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
6173 static int cmp_recombine_givs_stats PARAMS ((const PTR, const PTR));
6175 static rtx
6176 simplify_giv_expr (loop, x, benefit)
6177 const struct loop *loop;
6178 rtx x;
6179 int *benefit;
6181 enum machine_mode mode = GET_MODE (x);
6182 rtx arg0, arg1;
6183 rtx tem;
6185 /* If this is not an integer mode, or if we cannot do arithmetic in this
6186 mode, this can't be a giv. */
6187 if (mode != VOIDmode
6188 && (GET_MODE_CLASS (mode) != MODE_INT
6189 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6190 return NULL_RTX;
6192 switch (GET_CODE (x))
6194 case PLUS:
6195 arg0 = simplify_giv_expr (loop, XEXP (x, 0), benefit);
6196 arg1 = simplify_giv_expr (loop, XEXP (x, 1), benefit);
6197 if (arg0 == 0 || arg1 == 0)
6198 return NULL_RTX;
6200 /* Put constant last, CONST_INT last if both constant. */
6201 if ((GET_CODE (arg0) == USE
6202 || GET_CODE (arg0) == CONST_INT)
6203 && ! ((GET_CODE (arg0) == USE
6204 && GET_CODE (arg1) == USE)
6205 || GET_CODE (arg1) == CONST_INT))
6206 tem = arg0, arg0 = arg1, arg1 = tem;
6208 /* Handle addition of zero, then addition of an invariant. */
6209 if (arg1 == const0_rtx)
6210 return arg0;
6211 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6212 switch (GET_CODE (arg0))
6214 case CONST_INT:
6215 case USE:
6216 /* Adding two invariants must result in an invariant, so enclose
6217 addition operation inside a USE and return it. */
6218 if (GET_CODE (arg0) == USE)
6219 arg0 = XEXP (arg0, 0);
6220 if (GET_CODE (arg1) == USE)
6221 arg1 = XEXP (arg1, 0);
6223 if (GET_CODE (arg0) == CONST_INT)
6224 tem = arg0, arg0 = arg1, arg1 = tem;
6225 if (GET_CODE (arg1) == CONST_INT)
6226 tem = sge_plus_constant (arg0, arg1);
6227 else
6228 tem = sge_plus (mode, arg0, arg1);
6230 if (GET_CODE (tem) != CONST_INT)
6231 tem = gen_rtx_USE (mode, tem);
6232 return tem;
6234 case REG:
6235 case MULT:
6236 /* biv + invar or mult + invar. Return sum. */
6237 return gen_rtx_PLUS (mode, arg0, arg1);
6239 case PLUS:
6240 /* (a + invar_1) + invar_2. Associate. */
6241 return
6242 simplify_giv_expr (loop,
6243 gen_rtx_PLUS (mode,
6244 XEXP (arg0, 0),
6245 gen_rtx_PLUS (mode,
6246 XEXP (arg0, 1),
6247 arg1)),
6248 benefit);
6250 default:
6251 abort ();
6254 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6255 MULT to reduce cases. */
6256 if (GET_CODE (arg0) == REG)
6257 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6258 if (GET_CODE (arg1) == REG)
6259 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6261 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6262 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6263 Recurse to associate the second PLUS. */
6264 if (GET_CODE (arg1) == MULT)
6265 tem = arg0, arg0 = arg1, arg1 = tem;
6267 if (GET_CODE (arg1) == PLUS)
6268 return
6269 simplify_giv_expr (loop,
6270 gen_rtx_PLUS (mode,
6271 gen_rtx_PLUS (mode, arg0,
6272 XEXP (arg1, 0)),
6273 XEXP (arg1, 1)),
6274 benefit);
6276 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6277 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6278 return NULL_RTX;
6280 if (!rtx_equal_p (arg0, arg1))
6281 return NULL_RTX;
6283 return simplify_giv_expr (loop,
6284 gen_rtx_MULT (mode,
6285 XEXP (arg0, 0),
6286 gen_rtx_PLUS (mode,
6287 XEXP (arg0, 1),
6288 XEXP (arg1, 1))),
6289 benefit);
6291 case MINUS:
6292 /* Handle "a - b" as "a + b * (-1)". */
6293 return simplify_giv_expr (loop,
6294 gen_rtx_PLUS (mode,
6295 XEXP (x, 0),
6296 gen_rtx_MULT (mode,
6297 XEXP (x, 1),
6298 constm1_rtx)),
6299 benefit);
6301 case MULT:
6302 arg0 = simplify_giv_expr (loop, XEXP (x, 0), benefit);
6303 arg1 = simplify_giv_expr (loop, XEXP (x, 1), benefit);
6304 if (arg0 == 0 || arg1 == 0)
6305 return NULL_RTX;
6307 /* Put constant last, CONST_INT last if both constant. */
6308 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6309 && GET_CODE (arg1) != CONST_INT)
6310 tem = arg0, arg0 = arg1, arg1 = tem;
6312 /* If second argument is not now constant, not giv. */
6313 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6314 return NULL_RTX;
6316 /* Handle multiply by 0 or 1. */
6317 if (arg1 == const0_rtx)
6318 return const0_rtx;
6320 else if (arg1 == const1_rtx)
6321 return arg0;
6323 switch (GET_CODE (arg0))
6325 case REG:
6326 /* biv * invar. Done. */
6327 return gen_rtx_MULT (mode, arg0, arg1);
6329 case CONST_INT:
6330 /* Product of two constants. */
6331 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6333 case USE:
6334 /* invar * invar is a giv, but attempt to simplify it somehow. */
6335 if (GET_CODE (arg1) != CONST_INT)
6336 return NULL_RTX;
6338 arg0 = XEXP (arg0, 0);
6339 if (GET_CODE (arg0) == MULT)
6341 /* (invar_0 * invar_1) * invar_2. Associate. */
6342 return simplify_giv_expr (loop,
6343 gen_rtx_MULT (mode,
6344 XEXP (arg0, 0),
6345 gen_rtx_MULT (mode,
6346 XEXP (arg0,
6348 arg1)),
6349 benefit);
6351 /* Porpagate the MULT expressions to the intermost nodes. */
6352 else if (GET_CODE (arg0) == PLUS)
6354 /* (invar_0 + invar_1) * invar_2. Distribute. */
6355 return simplify_giv_expr (loop,
6356 gen_rtx_PLUS (mode,
6357 gen_rtx_MULT (mode,
6358 XEXP (arg0,
6360 arg1),
6361 gen_rtx_MULT (mode,
6362 XEXP (arg0,
6364 arg1)),
6365 benefit);
6367 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6369 case MULT:
6370 /* (a * invar_1) * invar_2. Associate. */
6371 return simplify_giv_expr (loop,
6372 gen_rtx_MULT (mode,
6373 XEXP (arg0, 0),
6374 gen_rtx_MULT (mode,
6375 XEXP (arg0, 1),
6376 arg1)),
6377 benefit);
6379 case PLUS:
6380 /* (a + invar_1) * invar_2. Distribute. */
6381 return simplify_giv_expr (loop,
6382 gen_rtx_PLUS (mode,
6383 gen_rtx_MULT (mode,
6384 XEXP (arg0, 0),
6385 arg1),
6386 gen_rtx_MULT (mode,
6387 XEXP (arg0, 1),
6388 arg1)),
6389 benefit);
6391 default:
6392 abort ();
6395 case ASHIFT:
6396 /* Shift by constant is multiply by power of two. */
6397 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6398 return 0;
6400 return
6401 simplify_giv_expr (loop,
6402 gen_rtx_MULT (mode,
6403 XEXP (x, 0),
6404 GEN_INT ((HOST_WIDE_INT) 1
6405 << INTVAL (XEXP (x, 1)))),
6406 benefit);
6408 case NEG:
6409 /* "-a" is "a * (-1)" */
6410 return simplify_giv_expr (loop,
6411 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6412 benefit);
6414 case NOT:
6415 /* "~a" is "-a - 1". Silly, but easy. */
6416 return simplify_giv_expr (loop,
6417 gen_rtx_MINUS (mode,
6418 gen_rtx_NEG (mode, XEXP (x, 0)),
6419 const1_rtx),
6420 benefit);
6422 case USE:
6423 /* Already in proper form for invariant. */
6424 return x;
6426 case REG:
6427 /* If this is a new register, we can't deal with it. */
6428 if (REGNO (x) >= max_reg_before_loop)
6429 return 0;
6431 /* Check for biv or giv. */
6432 switch (REG_IV_TYPE (REGNO (x)))
6434 case BASIC_INDUCT:
6435 return x;
6436 case GENERAL_INDUCT:
6438 struct induction *v = REG_IV_INFO (REGNO (x));
6440 /* Form expression from giv and add benefit. Ensure this giv
6441 can derive another and subtract any needed adjustment if so. */
6442 *benefit += v->benefit;
6443 if (v->cant_derive)
6444 return 0;
6446 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6447 v->src_reg, v->mult_val),
6448 v->add_val);
6450 if (v->derive_adjustment)
6451 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6452 return simplify_giv_expr (loop, tem, benefit);
6455 default:
6456 /* If it isn't an induction variable, and it is invariant, we
6457 may be able to simplify things further by looking through
6458 the bits we just moved outside the loop. */
6459 if (loop_invariant_p (loop, x) == 1)
6461 struct movable *m;
6463 for (m = the_movables; m; m = m->next)
6464 if (rtx_equal_p (x, m->set_dest))
6466 /* Ok, we found a match. Substitute and simplify. */
6468 /* If we match another movable, we must use that, as
6469 this one is going away. */
6470 if (m->match)
6471 return simplify_giv_expr (loop, m->match->set_dest,
6472 benefit);
6474 /* If consec is non-zero, this is a member of a group of
6475 instructions that were moved together. We handle this
6476 case only to the point of seeking to the last insn and
6477 looking for a REG_EQUAL. Fail if we don't find one. */
6478 if (m->consec != 0)
6480 int i = m->consec;
6481 tem = m->insn;
6482 do { tem = NEXT_INSN (tem); } while (--i > 0);
6484 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6485 if (tem)
6486 tem = XEXP (tem, 0);
6488 else
6490 tem = single_set (m->insn);
6491 if (tem)
6492 tem = SET_SRC (tem);
6495 if (tem)
6497 /* What we are most interested in is pointer
6498 arithmetic on invariants -- only take
6499 patterns we may be able to do something with. */
6500 if (GET_CODE (tem) == PLUS
6501 || GET_CODE (tem) == MULT
6502 || GET_CODE (tem) == ASHIFT
6503 || GET_CODE (tem) == CONST_INT
6504 || GET_CODE (tem) == SYMBOL_REF)
6506 tem = simplify_giv_expr (loop, tem, benefit);
6507 if (tem)
6508 return tem;
6510 else if (GET_CODE (tem) == CONST
6511 && GET_CODE (XEXP (tem, 0)) == PLUS
6512 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6513 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6515 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6516 benefit);
6517 if (tem)
6518 return tem;
6521 break;
6524 break;
6527 /* Fall through to general case. */
6528 default:
6529 /* If invariant, return as USE (unless CONST_INT).
6530 Otherwise, not giv. */
6531 if (GET_CODE (x) == USE)
6532 x = XEXP (x, 0);
6534 if (loop_invariant_p (loop, x) == 1)
6536 if (GET_CODE (x) == CONST_INT)
6537 return x;
6538 if (GET_CODE (x) == CONST
6539 && GET_CODE (XEXP (x, 0)) == PLUS
6540 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6541 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6542 x = XEXP (x, 0);
6543 return gen_rtx_USE (mode, x);
6545 else
6546 return 0;
6550 /* This routine folds invariants such that there is only ever one
6551 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6553 static rtx
6554 sge_plus_constant (x, c)
6555 rtx x, c;
6557 if (GET_CODE (x) == CONST_INT)
6558 return GEN_INT (INTVAL (x) + INTVAL (c));
6559 else if (GET_CODE (x) != PLUS)
6560 return gen_rtx_PLUS (GET_MODE (x), x, c);
6561 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6563 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6564 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6566 else if (GET_CODE (XEXP (x, 0)) == PLUS
6567 || GET_CODE (XEXP (x, 1)) != PLUS)
6569 return gen_rtx_PLUS (GET_MODE (x),
6570 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6572 else
6574 return gen_rtx_PLUS (GET_MODE (x),
6575 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6579 static rtx
6580 sge_plus (mode, x, y)
6581 enum machine_mode mode;
6582 rtx x, y;
6584 while (GET_CODE (y) == PLUS)
6586 rtx a = XEXP (y, 0);
6587 if (GET_CODE (a) == CONST_INT)
6588 x = sge_plus_constant (x, a);
6589 else
6590 x = gen_rtx_PLUS (mode, x, a);
6591 y = XEXP (y, 1);
6593 if (GET_CODE (y) == CONST_INT)
6594 x = sge_plus_constant (x, y);
6595 else
6596 x = gen_rtx_PLUS (mode, x, y);
6597 return x;
6600 /* Help detect a giv that is calculated by several consecutive insns;
6601 for example,
6602 giv = biv * M
6603 giv = giv + A
6604 The caller has already identified the first insn P as having a giv as dest;
6605 we check that all other insns that set the same register follow
6606 immediately after P, that they alter nothing else,
6607 and that the result of the last is still a giv.
6609 The value is 0 if the reg set in P is not really a giv.
6610 Otherwise, the value is the amount gained by eliminating
6611 all the consecutive insns that compute the value.
6613 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6614 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6616 The coefficients of the ultimate giv value are stored in
6617 *MULT_VAL and *ADD_VAL. */
6619 static int
6620 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6621 add_val, mult_val, last_consec_insn)
6622 const struct loop *loop;
6623 int first_benefit;
6624 rtx p;
6625 rtx src_reg;
6626 rtx dest_reg;
6627 rtx *add_val;
6628 rtx *mult_val;
6629 rtx *last_consec_insn;
6631 int count;
6632 enum rtx_code code;
6633 int benefit;
6634 rtx temp;
6635 rtx set;
6637 /* Indicate that this is a giv so that we can update the value produced in
6638 each insn of the multi-insn sequence.
6640 This induction structure will be used only by the call to
6641 general_induction_var below, so we can allocate it on our stack.
6642 If this is a giv, our caller will replace the induct var entry with
6643 a new induction structure. */
6644 struct induction *v
6645 = (struct induction *) alloca (sizeof (struct induction));
6646 v->src_reg = src_reg;
6647 v->mult_val = *mult_val;
6648 v->add_val = *add_val;
6649 v->benefit = first_benefit;
6650 v->cant_derive = 0;
6651 v->derive_adjustment = 0;
6653 REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
6654 REG_IV_INFO (REGNO (dest_reg)) = v;
6656 count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
6658 while (count > 0)
6660 p = NEXT_INSN (p);
6661 code = GET_CODE (p);
6663 /* If libcall, skip to end of call sequence. */
6664 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6665 p = XEXP (temp, 0);
6667 if (code == INSN
6668 && (set = single_set (p))
6669 && GET_CODE (SET_DEST (set)) == REG
6670 && SET_DEST (set) == dest_reg
6671 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6672 add_val, mult_val, 0, &benefit, VOIDmode)
6673 /* Giv created by equivalent expression. */
6674 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6675 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6676 add_val, mult_val, 0, &benefit,
6677 VOIDmode)))
6678 && src_reg == v->src_reg)
6680 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6681 benefit += libcall_benefit (p);
6683 count--;
6684 v->mult_val = *mult_val;
6685 v->add_val = *add_val;
6686 v->benefit = benefit;
6688 else if (code != NOTE)
6690 /* Allow insns that set something other than this giv to a
6691 constant. Such insns are needed on machines which cannot
6692 include long constants and should not disqualify a giv. */
6693 if (code == INSN
6694 && (set = single_set (p))
6695 && SET_DEST (set) != dest_reg
6696 && CONSTANT_P (SET_SRC (set)))
6697 continue;
6699 REG_IV_TYPE (REGNO (dest_reg)) = UNKNOWN_INDUCT;
6700 return 0;
6704 *last_consec_insn = p;
6705 return v->benefit;
6708 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6709 represented by G1. If no such expression can be found, or it is clear that
6710 it cannot possibly be a valid address, 0 is returned.
6712 To perform the computation, we note that
6713 G1 = x * v + a and
6714 G2 = y * v + b
6715 where `v' is the biv.
6717 So G2 = (y/b) * G1 + (b - a*y/x).
6719 Note that MULT = y/x.
6721 Update: A and B are now allowed to be additive expressions such that
6722 B contains all variables in A. That is, computing B-A will not require
6723 subtracting variables. */
6725 static rtx
6726 express_from_1 (a, b, mult)
6727 rtx a, b, mult;
6729 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6731 if (mult == const0_rtx)
6732 return b;
6734 /* If MULT is not 1, we cannot handle A with non-constants, since we
6735 would then be required to subtract multiples of the registers in A.
6736 This is theoretically possible, and may even apply to some Fortran
6737 constructs, but it is a lot of work and we do not attempt it here. */
6739 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6740 return NULL_RTX;
6742 /* In general these structures are sorted top to bottom (down the PLUS
6743 chain), but not left to right across the PLUS. If B is a higher
6744 order giv than A, we can strip one level and recurse. If A is higher
6745 order, we'll eventually bail out, but won't know that until the end.
6746 If they are the same, we'll strip one level around this loop. */
6748 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6750 rtx ra, rb, oa, ob, tmp;
6752 ra = XEXP (a, 0), oa = XEXP (a, 1);
6753 if (GET_CODE (ra) == PLUS)
6754 tmp = ra, ra = oa, oa = tmp;
6756 rb = XEXP (b, 0), ob = XEXP (b, 1);
6757 if (GET_CODE (rb) == PLUS)
6758 tmp = rb, rb = ob, ob = tmp;
6760 if (rtx_equal_p (ra, rb))
6761 /* We matched: remove one reg completely. */
6762 a = oa, b = ob;
6763 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6764 /* An alternate match. */
6765 a = oa, b = rb;
6766 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6767 /* An alternate match. */
6768 a = ra, b = ob;
6769 else
6771 /* Indicates an extra register in B. Strip one level from B and
6772 recurse, hoping B was the higher order expression. */
6773 ob = express_from_1 (a, ob, mult);
6774 if (ob == NULL_RTX)
6775 return NULL_RTX;
6776 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6780 /* Here we are at the last level of A, go through the cases hoping to
6781 get rid of everything but a constant. */
6783 if (GET_CODE (a) == PLUS)
6785 rtx ra, oa;
6787 ra = XEXP (a, 0), oa = XEXP (a, 1);
6788 if (rtx_equal_p (oa, b))
6789 oa = ra;
6790 else if (!rtx_equal_p (ra, b))
6791 return NULL_RTX;
6793 if (GET_CODE (oa) != CONST_INT)
6794 return NULL_RTX;
6796 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6798 else if (GET_CODE (a) == CONST_INT)
6800 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6802 else if (CONSTANT_P (a))
6804 return simplify_gen_binary (MINUS, GET_MODE (b) != VOIDmode ? GET_MODE (b) : GET_MODE (a), const0_rtx, a);
6806 else if (GET_CODE (b) == PLUS)
6808 if (rtx_equal_p (a, XEXP (b, 0)))
6809 return XEXP (b, 1);
6810 else if (rtx_equal_p (a, XEXP (b, 1)))
6811 return XEXP (b, 0);
6812 else
6813 return NULL_RTX;
6815 else if (rtx_equal_p (a, b))
6816 return const0_rtx;
6818 return NULL_RTX;
6822 express_from (g1, g2)
6823 struct induction *g1, *g2;
6825 rtx mult, add;
6827 /* The value that G1 will be multiplied by must be a constant integer. Also,
6828 the only chance we have of getting a valid address is if b*c/a (see above
6829 for notation) is also an integer. */
6830 if (GET_CODE (g1->mult_val) == CONST_INT
6831 && GET_CODE (g2->mult_val) == CONST_INT)
6833 if (g1->mult_val == const0_rtx
6834 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6835 return NULL_RTX;
6836 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6838 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6839 mult = const1_rtx;
6840 else
6842 /* ??? Find out if the one is a multiple of the other? */
6843 return NULL_RTX;
6846 add = express_from_1 (g1->add_val, g2->add_val, mult);
6847 if (add == NULL_RTX)
6849 /* Failed. If we've got a multiplication factor between G1 and G2,
6850 scale G1's addend and try again. */
6851 if (INTVAL (mult) > 1)
6853 rtx g1_add_val = g1->add_val;
6854 if (GET_CODE (g1_add_val) == MULT
6855 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6857 HOST_WIDE_INT m;
6858 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6859 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6860 XEXP (g1_add_val, 0), GEN_INT (m));
6862 else
6864 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6865 mult);
6868 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6871 if (add == NULL_RTX)
6872 return NULL_RTX;
6874 /* Form simplified final result. */
6875 if (mult == const0_rtx)
6876 return add;
6877 else if (mult == const1_rtx)
6878 mult = g1->dest_reg;
6879 else
6880 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6882 if (add == const0_rtx)
6883 return mult;
6884 else
6886 if (GET_CODE (add) == PLUS
6887 && CONSTANT_P (XEXP (add, 1)))
6889 rtx tem = XEXP (add, 1);
6890 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
6891 add = tem;
6894 return gen_rtx_PLUS (g2->mode, mult, add);
6898 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6899 represented by G1. This indicates that G2 should be combined with G1 and
6900 that G2 can use (either directly or via an address expression) a register
6901 used to represent G1. */
6903 static rtx
6904 combine_givs_p (g1, g2)
6905 struct induction *g1, *g2;
6907 rtx tem = express_from (g1, g2);
6909 /* If these givs are identical, they can be combined. We use the results
6910 of express_from because the addends are not in a canonical form, so
6911 rtx_equal_p is a weaker test. */
6912 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
6913 combination to be the other way round. */
6914 if (tem == g1->dest_reg
6915 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
6917 return g1->dest_reg;
6920 /* If G2 can be expressed as a function of G1 and that function is valid
6921 as an address and no more expensive than using a register for G2,
6922 the expression of G2 in terms of G1 can be used. */
6923 if (tem != NULL_RTX
6924 && g2->giv_type == DEST_ADDR
6925 && memory_address_p (g2->mem_mode, tem)
6926 /* ??? Looses, especially with -fforce-addr, where *g2->location
6927 will always be a register, and so anything more complicated
6928 gets discarded. */
6929 #if 0
6930 #ifdef ADDRESS_COST
6931 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6932 #else
6933 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6934 #endif
6935 #endif
6938 return tem;
6941 return NULL_RTX;
6944 struct combine_givs_stats
6946 int giv_number;
6947 int total_benefit;
6950 static int
6951 cmp_combine_givs_stats (xp, yp)
6952 const PTR xp;
6953 const PTR yp;
6955 const struct combine_givs_stats * const x =
6956 (const struct combine_givs_stats *) xp;
6957 const struct combine_givs_stats * const y =
6958 (const struct combine_givs_stats *) yp;
6959 int d;
6960 d = y->total_benefit - x->total_benefit;
6961 /* Stabilize the sort. */
6962 if (!d)
6963 d = x->giv_number - y->giv_number;
6964 return d;
6967 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6968 any other. If so, point SAME to the giv combined with and set NEW_REG to
6969 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6970 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6972 static void
6973 combine_givs (bl)
6974 struct iv_class *bl;
6976 /* Additional benefit to add for being combined multiple times. */
6977 const int extra_benefit = 3;
6979 struct induction *g1, *g2, **giv_array;
6980 int i, j, k, giv_count;
6981 struct combine_givs_stats *stats;
6982 rtx *can_combine;
6984 /* Count givs, because bl->giv_count is incorrect here. */
6985 giv_count = 0;
6986 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6987 if (!g1->ignore)
6988 giv_count++;
6990 giv_array
6991 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6992 i = 0;
6993 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6994 if (!g1->ignore)
6995 giv_array[i++] = g1;
6997 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
6998 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7000 for (i = 0; i < giv_count; i++)
7002 int this_benefit;
7003 rtx single_use;
7005 g1 = giv_array[i];
7006 stats[i].giv_number = i;
7008 /* If a DEST_REG GIV is used only once, do not allow it to combine
7009 with anything, for in doing so we will gain nothing that cannot
7010 be had by simply letting the GIV with which we would have combined
7011 to be reduced on its own. The losage shows up in particular with
7012 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7013 be seen elsewhere as well. */
7014 if (g1->giv_type == DEST_REG
7015 && (single_use = VARRAY_RTX (reg_single_usage, REGNO (g1->dest_reg)))
7016 && single_use != const0_rtx)
7017 continue;
7019 this_benefit = g1->benefit;
7020 /* Add an additional weight for zero addends. */
7021 if (g1->no_const_addval)
7022 this_benefit += 1;
7024 for (j = 0; j < giv_count; j++)
7026 rtx this_combine;
7028 g2 = giv_array[j];
7029 if (g1 != g2
7030 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7032 can_combine[i * giv_count + j] = this_combine;
7033 this_benefit += g2->benefit + extra_benefit;
7036 stats[i].total_benefit = this_benefit;
7039 /* Iterate, combining until we can't. */
7040 restart:
7041 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7043 if (loop_dump_stream)
7045 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7046 for (k = 0; k < giv_count; k++)
7048 g1 = giv_array[stats[k].giv_number];
7049 if (!g1->combined_with && !g1->same)
7050 fprintf (loop_dump_stream, " {%d, %d}",
7051 INSN_UID (giv_array[stats[k].giv_number]->insn),
7052 stats[k].total_benefit);
7054 putc ('\n', loop_dump_stream);
7057 for (k = 0; k < giv_count; k++)
7059 int g1_add_benefit = 0;
7061 i = stats[k].giv_number;
7062 g1 = giv_array[i];
7064 /* If it has already been combined, skip. */
7065 if (g1->combined_with || g1->same)
7066 continue;
7068 for (j = 0; j < giv_count; j++)
7070 g2 = giv_array[j];
7071 if (g1 != g2 && can_combine[i * giv_count + j]
7072 /* If it has already been combined, skip. */
7073 && ! g2->same && ! g2->combined_with)
7075 int l;
7077 g2->new_reg = can_combine[i * giv_count + j];
7078 g2->same = g1;
7079 g1->combined_with++;
7080 g1->lifetime += g2->lifetime;
7082 g1_add_benefit += g2->benefit;
7084 /* ??? The new final_[bg]iv_value code does a much better job
7085 of finding replaceable giv's, and hence this code may no
7086 longer be necessary. */
7087 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7088 g1_add_benefit -= copy_cost;
7090 /* To help optimize the next set of combinations, remove
7091 this giv from the benefits of other potential mates. */
7092 for (l = 0; l < giv_count; ++l)
7094 int m = stats[l].giv_number;
7095 if (can_combine[m * giv_count + j])
7096 stats[l].total_benefit -= g2->benefit + extra_benefit;
7099 if (loop_dump_stream)
7100 fprintf (loop_dump_stream,
7101 "giv at %d combined with giv at %d\n",
7102 INSN_UID (g2->insn), INSN_UID (g1->insn));
7106 /* To help optimize the next set of combinations, remove
7107 this giv from the benefits of other potential mates. */
7108 if (g1->combined_with)
7110 for (j = 0; j < giv_count; ++j)
7112 int m = stats[j].giv_number;
7113 if (can_combine[m * giv_count + i])
7114 stats[j].total_benefit -= g1->benefit + extra_benefit;
7117 g1->benefit += g1_add_benefit;
7119 /* We've finished with this giv, and everything it touched.
7120 Restart the combination so that proper weights for the
7121 rest of the givs are properly taken into account. */
7122 /* ??? Ideally we would compact the arrays at this point, so
7123 as to not cover old ground. But sanely compacting
7124 can_combine is tricky. */
7125 goto restart;
7129 /* Clean up. */
7130 free (stats);
7131 free (can_combine);
7134 struct recombine_givs_stats
7136 int giv_number;
7137 int start_luid, end_luid;
7140 /* Used below as comparison function for qsort. We want a ascending luid
7141 when scanning the array starting at the end, thus the arguments are
7142 used in reverse. */
7143 static int
7144 cmp_recombine_givs_stats (xp, yp)
7145 const PTR xp;
7146 const PTR yp;
7148 const struct recombine_givs_stats * const x =
7149 (const struct recombine_givs_stats *) xp;
7150 const struct recombine_givs_stats * const y =
7151 (const struct recombine_givs_stats *) yp;
7152 int d;
7153 d = y->start_luid - x->start_luid;
7154 /* Stabilize the sort. */
7155 if (!d)
7156 d = y->giv_number - x->giv_number;
7157 return d;
7160 /* Scan X, which is a part of INSN, for the end of life of a giv. Also
7161 look for the start of life of a giv where the start has not been seen
7162 yet to unlock the search for the end of its life.
7163 Only consider givs that belong to BIV.
7164 Return the total number of lifetime ends that have been found. */
7165 static int
7166 find_life_end (x, stats, insn, biv)
7167 rtx x, insn, biv;
7168 struct recombine_givs_stats *stats;
7170 enum rtx_code code;
7171 const char *fmt;
7172 int i, j;
7173 int retval;
7175 code = GET_CODE (x);
7176 switch (code)
7178 case SET:
7180 rtx reg = SET_DEST (x);
7181 if (GET_CODE (reg) == REG)
7183 int regno = REGNO (reg);
7184 struct induction *v = REG_IV_INFO (regno);
7186 if (REG_IV_TYPE (regno) == GENERAL_INDUCT
7187 && ! v->ignore
7188 && v->src_reg == biv
7189 && stats[v->ix].end_luid <= 0)
7191 /* If we see a 0 here for end_luid, it means that we have
7192 scanned the entire loop without finding any use at all.
7193 We must not predicate this code on a start_luid match
7194 since that would make the test fail for givs that have
7195 been hoisted out of inner loops. */
7196 if (stats[v->ix].end_luid == 0)
7198 stats[v->ix].end_luid = stats[v->ix].start_luid;
7199 return 1 + find_life_end (SET_SRC (x), stats, insn, biv);
7201 else if (stats[v->ix].start_luid == INSN_LUID (insn))
7202 stats[v->ix].end_luid = 0;
7204 return find_life_end (SET_SRC (x), stats, insn, biv);
7206 break;
7208 case REG:
7210 int regno = REGNO (x);
7211 struct induction *v = REG_IV_INFO (regno);
7213 if (REG_IV_TYPE (regno) == GENERAL_INDUCT
7214 && ! v->ignore
7215 && v->src_reg == biv
7216 && stats[v->ix].end_luid == 0)
7218 while (INSN_UID (insn) >= max_uid_for_loop)
7219 insn = NEXT_INSN (insn);
7220 stats[v->ix].end_luid = INSN_LUID (insn);
7221 return 1;
7223 return 0;
7225 case LABEL_REF:
7226 case CONST_DOUBLE:
7227 case CONST_INT:
7228 case CONST:
7229 return 0;
7230 default:
7231 break;
7233 fmt = GET_RTX_FORMAT (code);
7234 retval = 0;
7235 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7237 if (fmt[i] == 'e')
7238 retval += find_life_end (XEXP (x, i), stats, insn, biv);
7240 else if (fmt[i] == 'E')
7241 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7242 retval += find_life_end (XVECEXP (x, i, j), stats, insn, biv);
7244 return retval;
7247 /* For each giv that has been combined with another, look if
7248 we can combine it with the most recently used one instead.
7249 This tends to shorten giv lifetimes, and helps the next step:
7250 try to derive givs from other givs. */
7251 static void
7252 recombine_givs (loop, bl, unroll_p)
7253 const struct loop *loop;
7254 struct iv_class *bl;
7255 int unroll_p;
7257 struct induction *v, **giv_array, *last_giv;
7258 struct recombine_givs_stats *stats;
7259 int giv_count;
7260 int i, rescan;
7261 int ends_need_computing;
7263 for (giv_count = 0, v = bl->giv; v; v = v->next_iv)
7265 if (! v->ignore)
7266 giv_count++;
7268 giv_array
7269 = (struct induction **) xmalloc (giv_count * sizeof (struct induction *));
7270 stats = (struct recombine_givs_stats *) xmalloc (giv_count * sizeof *stats);
7272 /* Initialize stats and set up the ix field for each giv in stats to name
7273 the corresponding index into stats. */
7274 for (i = 0, v = bl->giv; v; v = v->next_iv)
7276 rtx p;
7278 if (v->ignore)
7279 continue;
7280 giv_array[i] = v;
7281 stats[i].giv_number = i;
7282 /* If this giv has been hoisted out of an inner loop, use the luid of
7283 the previous insn. */
7284 for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
7285 p = PREV_INSN (p);
7286 stats[i].start_luid = INSN_LUID (p);
7287 i++;
7290 qsort (stats, giv_count, sizeof (*stats), cmp_recombine_givs_stats);
7292 /* Set up the ix field for each giv in stats to name
7293 the corresponding index into stats, and
7294 do the actual most-recently-used recombination. */
7295 for (last_giv = 0, i = giv_count - 1; i >= 0; i--)
7297 v = giv_array[stats[i].giv_number];
7298 v->ix = i;
7299 if (v->same)
7301 struct induction *old_same = v->same;
7302 rtx new_combine;
7304 /* combine_givs_p actually says if we can make this transformation.
7305 The other tests are here only to avoid keeping a giv alive
7306 that could otherwise be eliminated. */
7307 if (last_giv
7308 && ((old_same->maybe_dead && ! old_same->combined_with)
7309 || ! last_giv->maybe_dead
7310 || last_giv->combined_with)
7311 && (new_combine = combine_givs_p (last_giv, v)))
7313 old_same->combined_with--;
7314 v->new_reg = new_combine;
7315 v->same = last_giv;
7316 last_giv->combined_with++;
7317 /* No need to update lifetimes / benefits here since we have
7318 already decided what to reduce. */
7320 if (loop_dump_stream)
7322 fprintf (loop_dump_stream,
7323 "giv at %d recombined with giv at %d as ",
7324 INSN_UID (v->insn), INSN_UID (last_giv->insn));
7325 print_rtl (loop_dump_stream, v->new_reg);
7326 putc ('\n', loop_dump_stream);
7328 continue;
7330 v = v->same;
7332 else if (v->giv_type != DEST_REG)
7333 continue;
7334 if (! last_giv
7335 || (last_giv->maybe_dead && ! last_giv->combined_with)
7336 || ! v->maybe_dead
7337 || v->combined_with)
7338 last_giv = v;
7341 ends_need_computing = 0;
7342 /* For each DEST_REG giv, compute lifetime starts, and try to compute
7343 lifetime ends from regscan info. */
7344 for (i = giv_count - 1; i >= 0; i--)
7346 v = giv_array[stats[i].giv_number];
7347 if (v->ignore)
7348 continue;
7349 if (v->giv_type == DEST_ADDR)
7351 /* Loop unrolling of an inner loop can even create new DEST_REG
7352 givs. */
7353 rtx p;
7354 for (p = v->insn; INSN_UID (p) >= max_uid_for_loop;)
7355 p = PREV_INSN (p);
7356 stats[i].start_luid = stats[i].end_luid = INSN_LUID (p);
7357 if (p != v->insn)
7358 stats[i].end_luid++;
7360 else /* v->giv_type == DEST_REG */
7362 if (v->last_use)
7364 stats[i].start_luid = INSN_LUID (v->insn);
7365 stats[i].end_luid = INSN_LUID (v->last_use);
7367 else if (INSN_UID (v->insn) >= max_uid_for_loop)
7369 rtx p;
7370 /* This insn has been created by loop optimization on an inner
7371 loop. We don't have a proper start_luid that will match
7372 when we see the first set. But we do know that there will
7373 be no use before the set, so we can set end_luid to 0 so that
7374 we'll start looking for the last use right away. */
7375 for (p = PREV_INSN (v->insn); INSN_UID (p) >= max_uid_for_loop; )
7376 p = PREV_INSN (p);
7377 stats[i].start_luid = INSN_LUID (p);
7378 stats[i].end_luid = 0;
7379 ends_need_computing++;
7381 else
7383 int regno = REGNO (v->dest_reg);
7384 int count = VARRAY_INT (n_times_set, regno) - 1;
7385 rtx p = v->insn;
7387 /* Find the first insn that sets the giv, so that we can verify
7388 if this giv's lifetime wraps around the loop. We also need
7389 the luid of the first setting insn in order to detect the
7390 last use properly. */
7391 while (count)
7393 p = prev_nonnote_insn (p);
7394 if (reg_set_p (v->dest_reg, p))
7395 count--;
7398 stats[i].start_luid = INSN_LUID (p);
7399 if (stats[i].start_luid > uid_luid[REGNO_FIRST_UID (regno)])
7401 stats[i].end_luid = -1;
7402 ends_need_computing++;
7404 else
7406 stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)];
7407 if (stats[i].end_luid > INSN_LUID (loop->end))
7409 stats[i].end_luid = -1;
7410 ends_need_computing++;
7417 /* If the regscan information was unconclusive for one or more DEST_REG
7418 givs, scan the all insn in the loop to find out lifetime ends. */
7419 if (ends_need_computing)
7421 rtx biv = bl->biv->src_reg;
7422 rtx p = loop->end;
7426 if (p == loop->start)
7427 p = loop->end;
7428 p = PREV_INSN (p);
7429 if (! INSN_P (p))
7430 continue;
7431 ends_need_computing -= find_life_end (PATTERN (p), stats, p, biv);
7433 while (ends_need_computing);
7436 /* Set start_luid back to the last insn that sets the giv. This allows
7437 more combinations. */
7438 for (i = giv_count - 1; i >= 0; i--)
7440 v = giv_array[stats[i].giv_number];
7441 if (v->ignore)
7442 continue;
7443 if (INSN_UID (v->insn) < max_uid_for_loop)
7444 stats[i].start_luid = INSN_LUID (v->insn);
7447 /* Now adjust lifetime ends by taking combined givs into account. */
7448 for (i = giv_count - 1; i >= 0; i--)
7450 unsigned luid;
7451 int j;
7453 v = giv_array[stats[i].giv_number];
7454 if (v->ignore)
7455 continue;
7456 if (v->same && ! v->same->ignore)
7458 j = v->same->ix;
7459 luid = stats[i].start_luid;
7460 /* Use unsigned arithmetic to model loop wrap-around. */
7461 if (luid - stats[j].start_luid
7462 > (unsigned) stats[j].end_luid - stats[j].start_luid)
7463 stats[j].end_luid = luid;
7467 qsort (stats, giv_count, sizeof (*stats), cmp_recombine_givs_stats);
7469 /* Try to derive DEST_REG givs from previous DEST_REG givs with the
7470 same mult_val and non-overlapping lifetime. This reduces register
7471 pressure.
7472 Once we find a DEST_REG giv that is suitable to derive others from,
7473 we set last_giv to this giv, and try to derive as many other DEST_REG
7474 givs from it without joining overlapping lifetimes. If we then
7475 encounter a DEST_REG giv that we can't derive, we set rescan to the
7476 index for this giv (unless rescan is already set).
7477 When we are finished with the current LAST_GIV (i.e. the inner loop
7478 terminates), we start again with rescan, which then becomes the new
7479 LAST_GIV. */
7480 for (i = giv_count - 1; i >= 0; i = rescan)
7482 int life_start = 0, life_end = 0;
7484 for (last_giv = 0, rescan = -1; i >= 0; i--)
7486 rtx sum;
7488 v = giv_array[stats[i].giv_number];
7489 if (v->giv_type != DEST_REG || v->derived_from || v->same)
7490 continue;
7491 if (! last_giv)
7493 /* Don't use a giv that's likely to be dead to derive
7494 others - that would be likely to keep that giv alive. */
7495 if (! v->maybe_dead || v->combined_with)
7497 last_giv = v;
7498 life_start = stats[i].start_luid;
7499 life_end = stats[i].end_luid;
7501 continue;
7503 /* Use unsigned arithmetic to model loop wrap around. */
7504 if (((unsigned) stats[i].start_luid - life_start
7505 >= (unsigned) life_end - life_start)
7506 && ((unsigned) stats[i].end_luid - life_start
7507 > (unsigned) life_end - life_start)
7508 /* Check that the giv insn we're about to use for deriving
7509 precedes all uses of that giv. Note that initializing the
7510 derived giv would defeat the purpose of reducing register
7511 pressure.
7512 ??? We could arrange to move the insn. */
7513 && ((unsigned) stats[i].end_luid - INSN_LUID (loop->start)
7514 > (unsigned) stats[i].start_luid - INSN_LUID (loop->start))
7515 && rtx_equal_p (last_giv->mult_val, v->mult_val)
7516 /* ??? Could handle libcalls, but would need more logic. */
7517 && ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX)
7518 /* We would really like to know if for any giv that v
7519 is combined with, v->insn or any intervening biv increment
7520 dominates that combined giv. However, we
7521 don't have this detailed control flow information.
7522 N.B. since last_giv will be reduced, it is valid
7523 anywhere in the loop, so we don't need to check the
7524 validity of last_giv.
7525 We rely here on the fact that v->always_executed implies that
7526 there is no jump to someplace else in the loop before the
7527 giv insn, and hence any insn that is executed before the
7528 giv insn in the loop will have a lower luid. */
7529 && (v->always_executed || ! v->combined_with)
7530 && (sum = express_from (last_giv, v))
7531 /* Make sure we don't make the add more expensive. ADD_COST
7532 doesn't take different costs of registers and constants into
7533 account, so compare the cost of the actual SET_SRCs. */
7534 && (rtx_cost (sum, SET)
7535 <= rtx_cost (SET_SRC (single_set (v->insn)), SET))
7536 /* ??? unroll can't understand anything but reg + const_int
7537 sums. It would be cleaner to fix unroll. */
7538 && ((GET_CODE (sum) == PLUS
7539 && GET_CODE (XEXP (sum, 0)) == REG
7540 && GET_CODE (XEXP (sum, 1)) == CONST_INT)
7541 || ! unroll_p)
7542 && validate_change (v->insn, &PATTERN (v->insn),
7543 gen_rtx_SET (VOIDmode, v->dest_reg, sum), 0))
7545 v->derived_from = last_giv;
7546 life_end = stats[i].end_luid;
7548 if (loop_dump_stream)
7550 fprintf (loop_dump_stream,
7551 "giv at %d derived from %d as ",
7552 INSN_UID (v->insn), INSN_UID (last_giv->insn));
7553 print_rtl (loop_dump_stream, sum);
7554 putc ('\n', loop_dump_stream);
7557 else if (rescan < 0)
7558 rescan = i;
7562 /* Clean up. */
7563 free (giv_array);
7564 free (stats);
7567 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
7569 void
7570 emit_iv_add_mult (b, m, a, reg, insert_before)
7571 rtx b; /* initial value of basic induction variable */
7572 rtx m; /* multiplicative constant */
7573 rtx a; /* additive constant */
7574 rtx reg; /* destination register */
7575 rtx insert_before;
7577 rtx seq;
7578 rtx result;
7580 /* Prevent unexpected sharing of these rtx. */
7581 a = copy_rtx (a);
7582 b = copy_rtx (b);
7584 /* Increase the lifetime of any invariants moved further in code. */
7585 update_reg_last_use (a, insert_before);
7586 update_reg_last_use (b, insert_before);
7587 update_reg_last_use (m, insert_before);
7589 start_sequence ();
7590 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
7591 if (reg != result)
7592 emit_move_insn (reg, result);
7593 seq = gen_sequence ();
7594 end_sequence ();
7596 emit_insn_before (seq, insert_before);
7598 /* It is entirely possible that the expansion created lots of new
7599 registers. Iterate over the sequence we just created and
7600 record them all. */
7602 if (GET_CODE (seq) == SEQUENCE)
7604 int i;
7605 for (i = 0; i < XVECLEN (seq, 0); ++i)
7607 rtx set = single_set (XVECEXP (seq, 0, i));
7608 if (set && GET_CODE (SET_DEST (set)) == REG)
7609 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7612 else if (GET_CODE (seq) == SET
7613 && GET_CODE (SET_DEST (seq)) == REG)
7614 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7617 /* Test whether A * B can be computed without
7618 an actual multiply insn. Value is 1 if so. */
7620 static int
7621 product_cheap_p (a, b)
7622 rtx a;
7623 rtx b;
7625 int i;
7626 rtx tmp;
7627 struct obstack *old_rtl_obstack = rtl_obstack;
7628 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
7629 int win = 1;
7631 /* If only one is constant, make it B. */
7632 if (GET_CODE (a) == CONST_INT)
7633 tmp = a, a = b, b = tmp;
7635 /* If first constant, both constant, so don't need multiply. */
7636 if (GET_CODE (a) == CONST_INT)
7637 return 1;
7639 /* If second not constant, neither is constant, so would need multiply. */
7640 if (GET_CODE (b) != CONST_INT)
7641 return 0;
7643 /* One operand is constant, so might not need multiply insn. Generate the
7644 code for the multiply and see if a call or multiply, or long sequence
7645 of insns is generated. */
7647 rtl_obstack = &temp_obstack;
7648 start_sequence ();
7649 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
7650 tmp = gen_sequence ();
7651 end_sequence ();
7653 if (GET_CODE (tmp) == SEQUENCE)
7655 if (XVEC (tmp, 0) == 0)
7656 win = 1;
7657 else if (XVECLEN (tmp, 0) > 3)
7658 win = 0;
7659 else
7660 for (i = 0; i < XVECLEN (tmp, 0); i++)
7662 rtx insn = XVECEXP (tmp, 0, i);
7664 if (GET_CODE (insn) != INSN
7665 || (GET_CODE (PATTERN (insn)) == SET
7666 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7667 || (GET_CODE (PATTERN (insn)) == PARALLEL
7668 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7669 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7671 win = 0;
7672 break;
7676 else if (GET_CODE (tmp) == SET
7677 && GET_CODE (SET_SRC (tmp)) == MULT)
7678 win = 0;
7679 else if (GET_CODE (tmp) == PARALLEL
7680 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7681 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7682 win = 0;
7684 /* Free any storage we obtained in generating this multiply and restore rtl
7685 allocation to its normal obstack. */
7686 obstack_free (&temp_obstack, storage);
7687 rtl_obstack = old_rtl_obstack;
7689 return win;
7692 /* Check to see if loop can be terminated by a "decrement and branch until
7693 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7694 Also try reversing an increment loop to a decrement loop
7695 to see if the optimization can be performed.
7696 Value is nonzero if optimization was performed. */
7698 /* This is useful even if the architecture doesn't have such an insn,
7699 because it might change a loops which increments from 0 to n to a loop
7700 which decrements from n to 0. A loop that decrements to zero is usually
7701 faster than one that increments from zero. */
7703 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7704 such as approx_final_value, biv_total_increment, loop_iterations, and
7705 final_[bg]iv_value. */
7707 static int
7708 check_dbra_loop (loop, insn_count)
7709 struct loop *loop;
7710 int insn_count;
7712 struct iv_class *bl;
7713 rtx reg;
7714 rtx jump_label;
7715 rtx final_value;
7716 rtx start_value;
7717 rtx new_add_val;
7718 rtx comparison;
7719 rtx before_comparison;
7720 rtx p;
7721 rtx jump;
7722 rtx first_compare;
7723 int compare_and_branch;
7724 rtx loop_start = loop->start;
7725 rtx loop_end = loop->end;
7726 struct loop_info *loop_info = LOOP_INFO (loop);
7728 /* If last insn is a conditional branch, and the insn before tests a
7729 register value, try to optimize it. Otherwise, we can't do anything. */
7731 jump = PREV_INSN (loop_end);
7732 comparison = get_condition_for_loop (loop, jump);
7733 if (comparison == 0)
7734 return 0;
7735 if (!onlyjump_p (jump))
7736 return 0;
7738 /* Try to compute whether the compare/branch at the loop end is one or
7739 two instructions. */
7740 get_condition (jump, &first_compare);
7741 if (first_compare == jump)
7742 compare_and_branch = 1;
7743 else if (first_compare == prev_nonnote_insn (jump))
7744 compare_and_branch = 2;
7745 else
7746 return 0;
7749 /* If more than one condition is present to control the loop, then
7750 do not proceed, as this function does not know how to rewrite
7751 loop tests with more than one condition.
7753 Look backwards from the first insn in the last comparison
7754 sequence and see if we've got another comparison sequence. */
7756 rtx jump1;
7757 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7758 if (GET_CODE (jump1) == JUMP_INSN)
7759 return 0;
7762 /* Check all of the bivs to see if the compare uses one of them.
7763 Skip biv's set more than once because we can't guarantee that
7764 it will be zero on the last iteration. Also skip if the biv is
7765 used between its update and the test insn. */
7767 for (bl = loop_iv_list; bl; bl = bl->next)
7769 if (bl->biv_count == 1
7770 && ! bl->biv->maybe_multiple
7771 && bl->biv->dest_reg == XEXP (comparison, 0)
7772 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7773 first_compare))
7774 break;
7777 if (! bl)
7778 return 0;
7780 /* Look for the case where the basic induction variable is always
7781 nonnegative, and equals zero on the last iteration.
7782 In this case, add a reg_note REG_NONNEG, which allows the
7783 m68k DBRA instruction to be used. */
7785 if (((GET_CODE (comparison) == GT
7786 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7787 && INTVAL (XEXP (comparison, 1)) == -1)
7788 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7789 && GET_CODE (bl->biv->add_val) == CONST_INT
7790 && INTVAL (bl->biv->add_val) < 0)
7792 /* Initial value must be greater than 0,
7793 init_val % -dec_value == 0 to ensure that it equals zero on
7794 the last iteration */
7796 if (GET_CODE (bl->initial_value) == CONST_INT
7797 && INTVAL (bl->initial_value) > 0
7798 && (INTVAL (bl->initial_value)
7799 % (-INTVAL (bl->biv->add_val))) == 0)
7801 /* register always nonnegative, add REG_NOTE to branch */
7802 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7803 REG_NOTES (jump)
7804 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7805 REG_NOTES (jump));
7806 bl->nonneg = 1;
7808 return 1;
7811 /* If the decrement is 1 and the value was tested as >= 0 before
7812 the loop, then we can safely optimize. */
7813 for (p = loop_start; p; p = PREV_INSN (p))
7815 if (GET_CODE (p) == CODE_LABEL)
7816 break;
7817 if (GET_CODE (p) != JUMP_INSN)
7818 continue;
7820 before_comparison = get_condition_for_loop (loop, p);
7821 if (before_comparison
7822 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7823 && GET_CODE (before_comparison) == LT
7824 && XEXP (before_comparison, 1) == const0_rtx
7825 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7826 && INTVAL (bl->biv->add_val) == -1)
7828 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7829 REG_NOTES (jump)
7830 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7831 REG_NOTES (jump));
7832 bl->nonneg = 1;
7834 return 1;
7838 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7839 && INTVAL (bl->biv->add_val) > 0)
7841 /* Try to change inc to dec, so can apply above optimization. */
7842 /* Can do this if:
7843 all registers modified are induction variables or invariant,
7844 all memory references have non-overlapping addresses
7845 (obviously true if only one write)
7846 allow 2 insns for the compare/jump at the end of the loop. */
7847 /* Also, we must avoid any instructions which use both the reversed
7848 biv and another biv. Such instructions will fail if the loop is
7849 reversed. We meet this condition by requiring that either
7850 no_use_except_counting is true, or else that there is only
7851 one biv. */
7852 int num_nonfixed_reads = 0;
7853 /* 1 if the iteration var is used only to count iterations. */
7854 int no_use_except_counting = 0;
7855 /* 1 if the loop has no memory store, or it has a single memory store
7856 which is reversible. */
7857 int reversible_mem_store = 1;
7859 if (bl->giv_count == 0 && ! loop->exit_count)
7861 rtx bivreg = regno_reg_rtx[bl->regno];
7863 /* If there are no givs for this biv, and the only exit is the
7864 fall through at the end of the loop, then
7865 see if perhaps there are no uses except to count. */
7866 no_use_except_counting = 1;
7867 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7868 if (INSN_P (p))
7870 rtx set = single_set (p);
7872 if (set && GET_CODE (SET_DEST (set)) == REG
7873 && REGNO (SET_DEST (set)) == bl->regno)
7874 /* An insn that sets the biv is okay. */
7876 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7877 || p == prev_nonnote_insn (loop_end))
7878 && reg_mentioned_p (bivreg, PATTERN (p)))
7880 /* If either of these insns uses the biv and sets a pseudo
7881 that has more than one usage, then the biv has uses
7882 other than counting since it's used to derive a value
7883 that is used more than one time. */
7884 int note_set_pseudo_multiple_uses_retval = 0;
7885 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7886 &note_set_pseudo_multiple_uses_retval);
7887 if (note_set_pseudo_multiple_uses_retval)
7889 no_use_except_counting = 0;
7890 break;
7893 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7895 no_use_except_counting = 0;
7896 break;
7901 if (no_use_except_counting)
7902 /* No need to worry about MEMs. */
7904 else if (num_mem_sets <= 1)
7906 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7907 if (INSN_P (p))
7908 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
7910 /* If the loop has a single store, and the destination address is
7911 invariant, then we can't reverse the loop, because this address
7912 might then have the wrong value at loop exit.
7913 This would work if the source was invariant also, however, in that
7914 case, the insn should have been moved out of the loop. */
7916 if (num_mem_sets == 1)
7918 struct induction *v;
7920 reversible_mem_store
7921 = (! unknown_address_altered
7922 && ! unknown_constant_address_altered
7923 && ! loop_invariant_p (loop,
7924 XEXP (XEXP (loop_store_mems, 0),
7925 0)));
7927 /* If the store depends on a register that is set after the
7928 store, it depends on the initial value, and is thus not
7929 reversible. */
7930 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
7932 if (v->giv_type == DEST_REG
7933 && reg_mentioned_p (v->dest_reg,
7934 PATTERN (first_loop_store_insn))
7935 && loop_insn_first_p (first_loop_store_insn, v->insn))
7936 reversible_mem_store = 0;
7940 else
7941 return 0;
7943 /* This code only acts for innermost loops. Also it simplifies
7944 the memory address check by only reversing loops with
7945 zero or one memory access.
7946 Two memory accesses could involve parts of the same array,
7947 and that can't be reversed.
7948 If the biv is used only for counting, than we don't need to worry
7949 about all these things. */
7951 if ((num_nonfixed_reads <= 1
7952 && ! loop_info->has_call
7953 && ! loop_info->has_volatile
7954 && reversible_mem_store
7955 && (bl->giv_count + bl->biv_count + num_mem_sets
7956 + num_movables + compare_and_branch == insn_count)
7957 && (bl == loop_iv_list && bl->next == 0))
7958 || no_use_except_counting)
7960 rtx tem;
7962 /* Loop can be reversed. */
7963 if (loop_dump_stream)
7964 fprintf (loop_dump_stream, "Can reverse loop\n");
7966 /* Now check other conditions:
7968 The increment must be a constant, as must the initial value,
7969 and the comparison code must be LT.
7971 This test can probably be improved since +/- 1 in the constant
7972 can be obtained by changing LT to LE and vice versa; this is
7973 confusing. */
7975 if (comparison
7976 /* for constants, LE gets turned into LT */
7977 && (GET_CODE (comparison) == LT
7978 || (GET_CODE (comparison) == LE
7979 && no_use_except_counting)))
7981 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
7982 rtx initial_value, comparison_value;
7983 int nonneg = 0;
7984 enum rtx_code cmp_code;
7985 int comparison_const_width;
7986 unsigned HOST_WIDE_INT comparison_sign_mask;
7988 add_val = INTVAL (bl->biv->add_val);
7989 comparison_value = XEXP (comparison, 1);
7990 if (GET_MODE (comparison_value) == VOIDmode)
7991 comparison_const_width
7992 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
7993 else
7994 comparison_const_width
7995 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
7996 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
7997 comparison_const_width = HOST_BITS_PER_WIDE_INT;
7998 comparison_sign_mask
7999 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8001 /* If the comparison value is not a loop invariant, then we
8002 can not reverse this loop.
8004 ??? If the insns which initialize the comparison value as
8005 a whole compute an invariant result, then we could move
8006 them out of the loop and proceed with loop reversal. */
8007 if (! loop_invariant_p (loop, comparison_value))
8008 return 0;
8010 if (GET_CODE (comparison_value) == CONST_INT)
8011 comparison_val = INTVAL (comparison_value);
8012 initial_value = bl->initial_value;
8014 /* Normalize the initial value if it is an integer and
8015 has no other use except as a counter. This will allow
8016 a few more loops to be reversed. */
8017 if (no_use_except_counting
8018 && GET_CODE (comparison_value) == CONST_INT
8019 && GET_CODE (initial_value) == CONST_INT)
8021 comparison_val = comparison_val - INTVAL (bl->initial_value);
8022 /* The code below requires comparison_val to be a multiple
8023 of add_val in order to do the loop reversal, so
8024 round up comparison_val to a multiple of add_val.
8025 Since comparison_value is constant, we know that the
8026 current comparison code is LT. */
8027 comparison_val = comparison_val + add_val - 1;
8028 comparison_val
8029 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8030 /* We postpone overflow checks for COMPARISON_VAL here;
8031 even if there is an overflow, we might still be able to
8032 reverse the loop, if converting the loop exit test to
8033 NE is possible. */
8034 initial_value = const0_rtx;
8037 /* First check if we can do a vanilla loop reversal. */
8038 if (initial_value == const0_rtx
8039 /* If we have a decrement_and_branch_on_count,
8040 prefer the NE test, since this will allow that
8041 instruction to be generated. Note that we must
8042 use a vanilla loop reversal if the biv is used to
8043 calculate a giv or has a non-counting use. */
8044 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8045 && defined (HAVE_decrement_and_branch_on_count)
8046 && (! (add_val == 1 && loop->vtop
8047 && (bl->biv_count == 0
8048 || no_use_except_counting)))
8049 #endif
8050 && GET_CODE (comparison_value) == CONST_INT
8051 /* Now do postponed overflow checks on COMPARISON_VAL. */
8052 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8053 & comparison_sign_mask))
8055 /* Register will always be nonnegative, with value
8056 0 on last iteration */
8057 add_adjust = add_val;
8058 nonneg = 1;
8059 cmp_code = GE;
8061 else if (add_val == 1 && loop->vtop
8062 && (bl->biv_count == 0
8063 || no_use_except_counting))
8065 add_adjust = 0;
8066 cmp_code = NE;
8068 else
8069 return 0;
8071 if (GET_CODE (comparison) == LE)
8072 add_adjust -= add_val;
8074 /* If the initial value is not zero, or if the comparison
8075 value is not an exact multiple of the increment, then we
8076 can not reverse this loop. */
8077 if (initial_value == const0_rtx
8078 && GET_CODE (comparison_value) == CONST_INT)
8080 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8081 return 0;
8083 else
8085 if (! no_use_except_counting || add_val != 1)
8086 return 0;
8089 final_value = comparison_value;
8091 /* Reset these in case we normalized the initial value
8092 and comparison value above. */
8093 if (GET_CODE (comparison_value) == CONST_INT
8094 && GET_CODE (initial_value) == CONST_INT)
8096 comparison_value = GEN_INT (comparison_val);
8097 final_value
8098 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8100 bl->initial_value = initial_value;
8102 /* Save some info needed to produce the new insns. */
8103 reg = bl->biv->dest_reg;
8104 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
8105 if (jump_label == pc_rtx)
8106 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
8107 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
8109 /* Set start_value; if this is not a CONST_INT, we need
8110 to generate a SUB.
8111 Initialize biv to start_value before loop start.
8112 The old initializing insn will be deleted as a
8113 dead store by flow.c. */
8114 if (initial_value == const0_rtx
8115 && GET_CODE (comparison_value) == CONST_INT)
8117 start_value = GEN_INT (comparison_val - add_adjust);
8118 emit_insn_before (gen_move_insn (reg, start_value),
8119 loop_start);
8121 else if (GET_CODE (initial_value) == CONST_INT)
8123 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8124 enum machine_mode mode = GET_MODE (reg);
8125 enum insn_code icode
8126 = add_optab->handlers[(int) mode].insn_code;
8128 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
8129 || ! ((*insn_data[icode].operand[1].predicate)
8130 (comparison_value, mode))
8131 || ! ((*insn_data[icode].operand[2].predicate)
8132 (offset, mode)))
8133 return 0;
8134 start_value
8135 = gen_rtx_PLUS (mode, comparison_value, offset);
8136 emit_insn_before ((GEN_FCN (icode)
8137 (reg, comparison_value, offset)),
8138 loop_start);
8139 if (GET_CODE (comparison) == LE)
8140 final_value = gen_rtx_PLUS (mode, comparison_value,
8141 GEN_INT (add_val));
8143 else if (! add_adjust)
8145 enum machine_mode mode = GET_MODE (reg);
8146 enum insn_code icode
8147 = sub_optab->handlers[(int) mode].insn_code;
8148 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
8149 || ! ((*insn_data[icode].operand[1].predicate)
8150 (comparison_value, mode))
8151 || ! ((*insn_data[icode].operand[2].predicate)
8152 (initial_value, mode)))
8153 return 0;
8154 start_value
8155 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8156 emit_insn_before ((GEN_FCN (icode)
8157 (reg, comparison_value, initial_value)),
8158 loop_start);
8160 else
8161 /* We could handle the other cases too, but it'll be
8162 better to have a testcase first. */
8163 return 0;
8165 /* We may not have a single insn which can increment a reg, so
8166 create a sequence to hold all the insns from expand_inc. */
8167 start_sequence ();
8168 expand_inc (reg, new_add_val);
8169 tem = gen_sequence ();
8170 end_sequence ();
8172 p = emit_insn_before (tem, bl->biv->insn);
8173 delete_insn (bl->biv->insn);
8175 /* Update biv info to reflect its new status. */
8176 bl->biv->insn = p;
8177 bl->initial_value = start_value;
8178 bl->biv->add_val = new_add_val;
8180 /* Update loop info. */
8181 loop_info->initial_value = reg;
8182 loop_info->initial_equiv_value = reg;
8183 loop_info->final_value = const0_rtx;
8184 loop_info->final_equiv_value = const0_rtx;
8185 loop_info->comparison_value = const0_rtx;
8186 loop_info->comparison_code = cmp_code;
8187 loop_info->increment = new_add_val;
8189 /* Inc LABEL_NUSES so that delete_insn will
8190 not delete the label. */
8191 LABEL_NUSES (XEXP (jump_label, 0)) ++;
8193 /* Emit an insn after the end of the loop to set the biv's
8194 proper exit value if it is used anywhere outside the loop. */
8195 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8196 || ! bl->init_insn
8197 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8198 emit_insn_after (gen_move_insn (reg, final_value),
8199 loop_end);
8201 /* Delete compare/branch at end of loop. */
8202 delete_insn (PREV_INSN (loop_end));
8203 if (compare_and_branch == 2)
8204 delete_insn (first_compare);
8206 /* Add new compare/branch insn at end of loop. */
8207 start_sequence ();
8208 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8209 GET_MODE (reg), 0, 0,
8210 XEXP (jump_label, 0));
8211 tem = gen_sequence ();
8212 end_sequence ();
8213 emit_jump_insn_before (tem, loop_end);
8215 for (tem = PREV_INSN (loop_end);
8216 tem && GET_CODE (tem) != JUMP_INSN;
8217 tem = PREV_INSN (tem))
8220 if (tem)
8221 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8223 if (nonneg)
8225 if (tem)
8227 /* Increment of LABEL_NUSES done above. */
8228 /* Register is now always nonnegative,
8229 so add REG_NONNEG note to the branch. */
8230 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8231 REG_NOTES (tem));
8233 bl->nonneg = 1;
8236 /* No insn may reference both the reversed and another biv or it
8237 will fail (see comment near the top of the loop reversal
8238 code).
8239 Earlier on, we have verified that the biv has no use except
8240 counting, or it is the only biv in this function.
8241 However, the code that computes no_use_except_counting does
8242 not verify reg notes. It's possible to have an insn that
8243 references another biv, and has a REG_EQUAL note with an
8244 expression based on the reversed biv. To avoid this case,
8245 remove all REG_EQUAL notes based on the reversed biv
8246 here. */
8247 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8248 if (INSN_P (p))
8250 rtx *pnote;
8251 rtx set = single_set (p);
8252 /* If this is a set of a GIV based on the reversed biv, any
8253 REG_EQUAL notes should still be correct. */
8254 if (! set
8255 || GET_CODE (SET_DEST (set)) != REG
8256 || (size_t) REGNO (SET_DEST (set)) >= reg_iv_type->num_elements
8257 || REG_IV_TYPE (REGNO (SET_DEST (set))) != GENERAL_INDUCT
8258 || REG_IV_INFO (REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8259 for (pnote = &REG_NOTES (p); *pnote;)
8261 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8262 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8263 XEXP (*pnote, 0)))
8264 *pnote = XEXP (*pnote, 1);
8265 else
8266 pnote = &XEXP (*pnote, 1);
8270 /* Mark that this biv has been reversed. Each giv which depends
8271 on this biv, and which is also live past the end of the loop
8272 will have to be fixed up. */
8274 bl->reversed = 1;
8276 if (loop_dump_stream)
8278 fprintf (loop_dump_stream, "Reversed loop");
8279 if (bl->nonneg)
8280 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8281 else
8282 fprintf (loop_dump_stream, "\n");
8285 return 1;
8290 return 0;
8293 /* Verify whether the biv BL appears to be eliminable,
8294 based on the insns in the loop that refer to it.
8296 If ELIMINATE_P is non-zero, actually do the elimination.
8298 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8299 determine whether invariant insns should be placed inside or at the
8300 start of the loop. */
8302 static int
8303 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8304 const struct loop *loop;
8305 struct iv_class *bl;
8306 int eliminate_p;
8307 int threshold, insn_count;
8309 rtx reg = bl->biv->dest_reg;
8310 rtx loop_start = loop->start;
8311 rtx loop_end = loop->end;
8312 rtx p;
8314 /* Scan all insns in the loop, stopping if we find one that uses the
8315 biv in a way that we cannot eliminate. */
8317 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8319 enum rtx_code code = GET_CODE (p);
8320 rtx where = threshold >= insn_count ? loop_start : p;
8322 /* If this is a libcall that sets a giv, skip ahead to its end. */
8323 if (GET_RTX_CLASS (code) == 'i')
8325 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8327 if (note)
8329 rtx last = XEXP (note, 0);
8330 rtx set = single_set (last);
8332 if (set && GET_CODE (SET_DEST (set)) == REG)
8334 unsigned int regno = REGNO (SET_DEST (set));
8336 if (regno < max_reg_before_loop
8337 && REG_IV_TYPE (regno) == GENERAL_INDUCT
8338 && REG_IV_INFO (regno)->src_reg == bl->biv->src_reg)
8339 p = last;
8343 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8344 && reg_mentioned_p (reg, PATTERN (p))
8345 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8346 eliminate_p, where))
8348 if (loop_dump_stream)
8349 fprintf (loop_dump_stream,
8350 "Cannot eliminate biv %d: biv used in insn %d.\n",
8351 bl->regno, INSN_UID (p));
8352 break;
8356 if (p == loop_end)
8358 if (loop_dump_stream)
8359 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8360 bl->regno, eliminate_p ? "was" : "can be");
8361 return 1;
8364 return 0;
8367 /* INSN and REFERENCE are instructions in the same insn chain.
8368 Return non-zero if INSN is first. */
8371 loop_insn_first_p (insn, reference)
8372 rtx insn, reference;
8374 rtx p, q;
8376 for (p = insn, q = reference;;)
8378 /* Start with test for not first so that INSN == REFERENCE yields not
8379 first. */
8380 if (q == insn || ! p)
8381 return 0;
8382 if (p == reference || ! q)
8383 return 1;
8385 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8386 previous insn, hence the <= comparison below does not work if
8387 P is a note. */
8388 if (INSN_UID (p) < max_uid_for_loop
8389 && INSN_UID (q) < max_uid_for_loop
8390 && GET_CODE (p) != NOTE)
8391 return INSN_LUID (p) <= INSN_LUID (q);
8393 if (INSN_UID (p) >= max_uid_for_loop
8394 || GET_CODE (p) == NOTE)
8395 p = NEXT_INSN (p);
8396 if (INSN_UID (q) >= max_uid_for_loop)
8397 q = NEXT_INSN (q);
8401 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8402 the offset that we have to take into account due to auto-increment /
8403 div derivation is zero. */
8404 static int
8405 biv_elimination_giv_has_0_offset (biv, giv, insn)
8406 struct induction *biv, *giv;
8407 rtx insn;
8409 /* If the giv V had the auto-inc address optimization applied
8410 to it, and INSN occurs between the giv insn and the biv
8411 insn, then we'd have to adjust the value used here.
8412 This is rare, so we don't bother to make this possible. */
8413 if (giv->auto_inc_opt
8414 && ((loop_insn_first_p (giv->insn, insn)
8415 && loop_insn_first_p (insn, biv->insn))
8416 || (loop_insn_first_p (biv->insn, insn)
8417 && loop_insn_first_p (insn, giv->insn))))
8418 return 0;
8420 /* If the giv V was derived from another giv, and INSN does
8421 not occur between the giv insn and the biv insn, then we'd
8422 have to adjust the value used here. This is rare, so we don't
8423 bother to make this possible. */
8424 if (giv->derived_from
8425 && ! (giv->always_executed
8426 && loop_insn_first_p (giv->insn, insn)
8427 && loop_insn_first_p (insn, biv->insn)))
8428 return 0;
8429 if (giv->same
8430 && giv->same->derived_from
8431 && ! (giv->same->always_executed
8432 && loop_insn_first_p (giv->same->insn, insn)
8433 && loop_insn_first_p (insn, biv->insn)))
8434 return 0;
8436 return 1;
8439 /* If BL appears in X (part of the pattern of INSN), see if we can
8440 eliminate its use. If so, return 1. If not, return 0.
8442 If BIV does not appear in X, return 1.
8444 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
8445 where extra insns should be added. Depending on how many items have been
8446 moved out of the loop, it will either be before INSN or at the start of
8447 the loop. */
8449 static int
8450 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where)
8451 const struct loop *loop;
8452 rtx x, insn;
8453 struct iv_class *bl;
8454 int eliminate_p;
8455 rtx where;
8457 enum rtx_code code = GET_CODE (x);
8458 rtx reg = bl->biv->dest_reg;
8459 enum machine_mode mode = GET_MODE (reg);
8460 struct induction *v;
8461 rtx arg, tem;
8462 #ifdef HAVE_cc0
8463 rtx new;
8464 #endif
8465 int arg_operand;
8466 const char *fmt;
8467 int i, j;
8469 switch (code)
8471 case REG:
8472 /* If we haven't already been able to do something with this BIV,
8473 we can't eliminate it. */
8474 if (x == reg)
8475 return 0;
8476 return 1;
8478 case SET:
8479 /* If this sets the BIV, it is not a problem. */
8480 if (SET_DEST (x) == reg)
8481 return 1;
8483 /* If this is an insn that defines a giv, it is also ok because
8484 it will go away when the giv is reduced. */
8485 for (v = bl->giv; v; v = v->next_iv)
8486 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8487 return 1;
8489 #ifdef HAVE_cc0
8490 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8492 /* Can replace with any giv that was reduced and
8493 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8494 Require a constant for MULT_VAL, so we know it's nonzero.
8495 ??? We disable this optimization to avoid potential
8496 overflows. */
8498 for (v = bl->giv; v; v = v->next_iv)
8499 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8500 && v->add_val == const0_rtx
8501 && ! v->ignore && ! v->maybe_dead && v->always_computable
8502 && v->mode == mode
8503 && 0)
8505 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8506 continue;
8508 if (! eliminate_p)
8509 return 1;
8511 /* If the giv has the opposite direction of change,
8512 then reverse the comparison. */
8513 if (INTVAL (v->mult_val) < 0)
8514 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8515 const0_rtx, v->new_reg);
8516 else
8517 new = v->new_reg;
8519 /* We can probably test that giv's reduced reg. */
8520 if (validate_change (insn, &SET_SRC (x), new, 0))
8521 return 1;
8524 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8525 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8526 Require a constant for MULT_VAL, so we know it's nonzero.
8527 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8528 overflow problem. */
8530 for (v = bl->giv; v; v = v->next_iv)
8531 if (GET_CODE (v->mult_val) == CONST_INT
8532 && v->mult_val != const0_rtx
8533 && ! v->ignore && ! v->maybe_dead && v->always_computable
8534 && v->mode == mode
8535 && (GET_CODE (v->add_val) == SYMBOL_REF
8536 || GET_CODE (v->add_val) == LABEL_REF
8537 || GET_CODE (v->add_val) == CONST
8538 || (GET_CODE (v->add_val) == REG
8539 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
8541 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8542 continue;
8544 if (! eliminate_p)
8545 return 1;
8547 /* If the giv has the opposite direction of change,
8548 then reverse the comparison. */
8549 if (INTVAL (v->mult_val) < 0)
8550 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8551 v->new_reg);
8552 else
8553 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8554 copy_rtx (v->add_val));
8556 /* Replace biv with the giv's reduced register. */
8557 update_reg_last_use (v->add_val, insn);
8558 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8559 return 1;
8561 /* Insn doesn't support that constant or invariant. Copy it
8562 into a register (it will be a loop invariant.) */
8563 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8565 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
8566 where);
8568 /* Substitute the new register for its invariant value in
8569 the compare expression. */
8570 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8571 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8572 return 1;
8575 #endif
8576 break;
8578 case COMPARE:
8579 case EQ: case NE:
8580 case GT: case GE: case GTU: case GEU:
8581 case LT: case LE: case LTU: case LEU:
8582 /* See if either argument is the biv. */
8583 if (XEXP (x, 0) == reg)
8584 arg = XEXP (x, 1), arg_operand = 1;
8585 else if (XEXP (x, 1) == reg)
8586 arg = XEXP (x, 0), arg_operand = 0;
8587 else
8588 break;
8590 if (CONSTANT_P (arg))
8592 /* First try to replace with any giv that has constant positive
8593 mult_val and constant add_val. We might be able to support
8594 negative mult_val, but it seems complex to do it in general. */
8596 for (v = bl->giv; v; v = v->next_iv)
8597 if (GET_CODE (v->mult_val) == CONST_INT
8598 && INTVAL (v->mult_val) > 0
8599 && (GET_CODE (v->add_val) == SYMBOL_REF
8600 || GET_CODE (v->add_val) == LABEL_REF
8601 || GET_CODE (v->add_val) == CONST
8602 || (GET_CODE (v->add_val) == REG
8603 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
8604 && ! v->ignore && ! v->maybe_dead && v->always_computable
8605 && v->mode == mode)
8607 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8608 continue;
8610 if (! eliminate_p)
8611 return 1;
8613 /* Replace biv with the giv's reduced reg. */
8614 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8616 /* If all constants are actually constant integers and
8617 the derived constant can be directly placed in the COMPARE,
8618 do so. */
8619 if (GET_CODE (arg) == CONST_INT
8620 && GET_CODE (v->mult_val) == CONST_INT
8621 && GET_CODE (v->add_val) == CONST_INT)
8623 validate_change (insn, &XEXP (x, arg_operand),
8624 GEN_INT (INTVAL (arg)
8625 * INTVAL (v->mult_val)
8626 + INTVAL (v->add_val)), 1);
8628 else
8630 /* Otherwise, load it into a register. */
8631 tem = gen_reg_rtx (mode);
8632 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
8633 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8635 if (apply_change_group ())
8636 return 1;
8639 /* Look for giv with positive constant mult_val and nonconst add_val.
8640 Insert insns to calculate new compare value.
8641 ??? Turn this off due to possible overflow. */
8643 for (v = bl->giv; v; v = v->next_iv)
8644 if (GET_CODE (v->mult_val) == CONST_INT
8645 && INTVAL (v->mult_val) > 0
8646 && ! v->ignore && ! v->maybe_dead && v->always_computable
8647 && v->mode == mode
8648 && 0)
8650 rtx tem;
8652 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8653 continue;
8655 if (! eliminate_p)
8656 return 1;
8658 tem = gen_reg_rtx (mode);
8660 /* Replace biv with giv's reduced register. */
8661 validate_change (insn, &XEXP (x, 1 - arg_operand),
8662 v->new_reg, 1);
8664 /* Compute value to compare against. */
8665 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
8666 /* Use it in this insn. */
8667 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8668 if (apply_change_group ())
8669 return 1;
8672 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8674 if (loop_invariant_p (loop, arg) == 1)
8676 /* Look for giv with constant positive mult_val and nonconst
8677 add_val. Insert insns to compute new compare value.
8678 ??? Turn this off due to possible overflow. */
8680 for (v = bl->giv; v; v = v->next_iv)
8681 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8682 && ! v->ignore && ! v->maybe_dead && v->always_computable
8683 && v->mode == mode
8684 && 0)
8686 rtx tem;
8688 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8689 continue;
8691 if (! eliminate_p)
8692 return 1;
8694 tem = gen_reg_rtx (mode);
8696 /* Replace biv with giv's reduced register. */
8697 validate_change (insn, &XEXP (x, 1 - arg_operand),
8698 v->new_reg, 1);
8700 /* Compute value to compare against. */
8701 emit_iv_add_mult (arg, v->mult_val, v->add_val,
8702 tem, where);
8703 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8704 if (apply_change_group ())
8705 return 1;
8709 /* This code has problems. Basically, you can't know when
8710 seeing if we will eliminate BL, whether a particular giv
8711 of ARG will be reduced. If it isn't going to be reduced,
8712 we can't eliminate BL. We can try forcing it to be reduced,
8713 but that can generate poor code.
8715 The problem is that the benefit of reducing TV, below should
8716 be increased if BL can actually be eliminated, but this means
8717 we might have to do a topological sort of the order in which
8718 we try to process biv. It doesn't seem worthwhile to do
8719 this sort of thing now. */
8721 #if 0
8722 /* Otherwise the reg compared with had better be a biv. */
8723 if (GET_CODE (arg) != REG
8724 || REG_IV_TYPE (REGNO (arg)) != BASIC_INDUCT)
8725 return 0;
8727 /* Look for a pair of givs, one for each biv,
8728 with identical coefficients. */
8729 for (v = bl->giv; v; v = v->next_iv)
8731 struct induction *tv;
8733 if (v->ignore || v->maybe_dead || v->mode != mode)
8734 continue;
8736 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
8737 if (! tv->ignore && ! tv->maybe_dead
8738 && rtx_equal_p (tv->mult_val, v->mult_val)
8739 && rtx_equal_p (tv->add_val, v->add_val)
8740 && tv->mode == mode)
8742 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8743 continue;
8745 if (! eliminate_p)
8746 return 1;
8748 /* Replace biv with its giv's reduced reg. */
8749 XEXP (x, 1 - arg_operand) = v->new_reg;
8750 /* Replace other operand with the other giv's
8751 reduced reg. */
8752 XEXP (x, arg_operand) = tv->new_reg;
8753 return 1;
8756 #endif
8759 /* If we get here, the biv can't be eliminated. */
8760 return 0;
8762 case MEM:
8763 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8764 biv is used in it, since it will be replaced. */
8765 for (v = bl->giv; v; v = v->next_iv)
8766 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8767 return 1;
8768 break;
8770 default:
8771 break;
8774 /* See if any subexpression fails elimination. */
8775 fmt = GET_RTX_FORMAT (code);
8776 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8778 switch (fmt[i])
8780 case 'e':
8781 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8782 eliminate_p, where))
8783 return 0;
8784 break;
8786 case 'E':
8787 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8788 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8789 eliminate_p, where))
8790 return 0;
8791 break;
8795 return 1;
8798 /* Return nonzero if the last use of REG
8799 is in an insn following INSN in the same basic block. */
8801 static int
8802 last_use_this_basic_block (reg, insn)
8803 rtx reg;
8804 rtx insn;
8806 rtx n;
8807 for (n = insn;
8808 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8809 n = NEXT_INSN (n))
8811 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8812 return 1;
8814 return 0;
8817 /* Called via `note_stores' to record the initial value of a biv. Here we
8818 just record the location of the set and process it later. */
8820 static void
8821 record_initial (dest, set, data)
8822 rtx dest;
8823 rtx set;
8824 void *data ATTRIBUTE_UNUSED;
8826 struct iv_class *bl;
8828 if (GET_CODE (dest) != REG
8829 || REGNO (dest) >= max_reg_before_loop
8830 || REG_IV_TYPE (REGNO (dest)) != BASIC_INDUCT)
8831 return;
8833 bl = reg_biv_class[REGNO (dest)];
8835 /* If this is the first set found, record it. */
8836 if (bl->init_insn == 0)
8838 bl->init_insn = note_insn;
8839 bl->init_set = set;
8843 /* If any of the registers in X are "old" and currently have a last use earlier
8844 than INSN, update them to have a last use of INSN. Their actual last use
8845 will be the previous insn but it will not have a valid uid_luid so we can't
8846 use it. */
8848 static void
8849 update_reg_last_use (x, insn)
8850 rtx x;
8851 rtx insn;
8853 /* Check for the case where INSN does not have a valid luid. In this case,
8854 there is no need to modify the regno_last_uid, as this can only happen
8855 when code is inserted after the loop_end to set a pseudo's final value,
8856 and hence this insn will never be the last use of x. */
8857 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8858 && INSN_UID (insn) < max_uid_for_loop
8859 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
8860 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8861 else
8863 register int i, j;
8864 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8865 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8867 if (fmt[i] == 'e')
8868 update_reg_last_use (XEXP (x, i), insn);
8869 else if (fmt[i] == 'E')
8870 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8871 update_reg_last_use (XVECEXP (x, i, j), insn);
8876 /* Given an insn INSN and condition COND, return the condition in a
8877 canonical form to simplify testing by callers. Specifically:
8879 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8880 (2) Both operands will be machine operands; (cc0) will have been replaced.
8881 (3) If an operand is a constant, it will be the second operand.
8882 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8883 for GE, GEU, and LEU.
8885 If the condition cannot be understood, or is an inequality floating-point
8886 comparison which needs to be reversed, 0 will be returned.
8888 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
8890 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8891 insn used in locating the condition was found. If a replacement test
8892 of the condition is desired, it should be placed in front of that
8893 insn and we will be sure that the inputs are still valid.
8895 If WANT_REG is non-zero, we wish the condition to be relative to that
8896 register, if possible. Therefore, do not canonicalize the condition
8897 further. */
8900 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
8901 rtx insn;
8902 rtx cond;
8903 int reverse;
8904 rtx *earliest;
8905 rtx want_reg;
8907 enum rtx_code code;
8908 rtx prev = insn;
8909 rtx set;
8910 rtx tem;
8911 rtx op0, op1;
8912 int reverse_code = 0;
8913 int did_reverse_condition = 0;
8914 enum machine_mode mode;
8916 code = GET_CODE (cond);
8917 mode = GET_MODE (cond);
8918 op0 = XEXP (cond, 0);
8919 op1 = XEXP (cond, 1);
8921 if (reverse)
8923 code = reverse_condition (code);
8924 did_reverse_condition ^= 1;
8927 if (earliest)
8928 *earliest = insn;
8930 /* If we are comparing a register with zero, see if the register is set
8931 in the previous insn to a COMPARE or a comparison operation. Perform
8932 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
8933 in cse.c */
8935 while (GET_RTX_CLASS (code) == '<'
8936 && op1 == CONST0_RTX (GET_MODE (op0))
8937 && op0 != want_reg)
8939 /* Set non-zero when we find something of interest. */
8940 rtx x = 0;
8942 #ifdef HAVE_cc0
8943 /* If comparison with cc0, import actual comparison from compare
8944 insn. */
8945 if (op0 == cc0_rtx)
8947 if ((prev = prev_nonnote_insn (prev)) == 0
8948 || GET_CODE (prev) != INSN
8949 || (set = single_set (prev)) == 0
8950 || SET_DEST (set) != cc0_rtx)
8951 return 0;
8953 op0 = SET_SRC (set);
8954 op1 = CONST0_RTX (GET_MODE (op0));
8955 if (earliest)
8956 *earliest = prev;
8958 #endif
8960 /* If this is a COMPARE, pick up the two things being compared. */
8961 if (GET_CODE (op0) == COMPARE)
8963 op1 = XEXP (op0, 1);
8964 op0 = XEXP (op0, 0);
8965 continue;
8967 else if (GET_CODE (op0) != REG)
8968 break;
8970 /* Go back to the previous insn. Stop if it is not an INSN. We also
8971 stop if it isn't a single set or if it has a REG_INC note because
8972 we don't want to bother dealing with it. */
8974 if ((prev = prev_nonnote_insn (prev)) == 0
8975 || GET_CODE (prev) != INSN
8976 || FIND_REG_INC_NOTE (prev, 0)
8977 || (set = single_set (prev)) == 0)
8978 break;
8980 /* If this is setting OP0, get what it sets it to if it looks
8981 relevant. */
8982 if (rtx_equal_p (SET_DEST (set), op0))
8984 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
8986 /* ??? We may not combine comparisons done in a CCmode with
8987 comparisons not done in a CCmode. This is to aid targets
8988 like Alpha that have an IEEE compliant EQ instruction, and
8989 a non-IEEE compliant BEQ instruction. The use of CCmode is
8990 actually artificial, simply to prevent the combination, but
8991 should not affect other platforms.
8993 However, we must allow VOIDmode comparisons to match either
8994 CCmode or non-CCmode comparison, because some ports have
8995 modeless comparisons inside branch patterns.
8997 ??? This mode check should perhaps look more like the mode check
8998 in simplify_comparison in combine. */
9000 if ((GET_CODE (SET_SRC (set)) == COMPARE
9001 || (((code == NE
9002 || (code == LT
9003 && GET_MODE_CLASS (inner_mode) == MODE_INT
9004 && (GET_MODE_BITSIZE (inner_mode)
9005 <= HOST_BITS_PER_WIDE_INT)
9006 && (STORE_FLAG_VALUE
9007 & ((HOST_WIDE_INT) 1
9008 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9009 #ifdef FLOAT_STORE_FLAG_VALUE
9010 || (code == LT
9011 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9012 && (REAL_VALUE_NEGATIVE
9013 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9014 #endif
9016 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9017 && (((GET_MODE_CLASS (mode) == MODE_CC)
9018 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9019 || mode == VOIDmode || inner_mode == VOIDmode))
9020 x = SET_SRC (set);
9021 else if (((code == EQ
9022 || (code == GE
9023 && (GET_MODE_BITSIZE (inner_mode)
9024 <= HOST_BITS_PER_WIDE_INT)
9025 && GET_MODE_CLASS (inner_mode) == MODE_INT
9026 && (STORE_FLAG_VALUE
9027 & ((HOST_WIDE_INT) 1
9028 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9029 #ifdef FLOAT_STORE_FLAG_VALUE
9030 || (code == GE
9031 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9032 && (REAL_VALUE_NEGATIVE
9033 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9034 #endif
9036 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9037 && (((GET_MODE_CLASS (mode) == MODE_CC)
9038 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9039 || mode == VOIDmode || inner_mode == VOIDmode))
9042 /* We might have reversed a LT to get a GE here. But this wasn't
9043 actually the comparison of data, so we don't flag that we
9044 have had to reverse the condition. */
9045 did_reverse_condition ^= 1;
9046 reverse_code = 1;
9047 x = SET_SRC (set);
9049 else
9050 break;
9053 else if (reg_set_p (op0, prev))
9054 /* If this sets OP0, but not directly, we have to give up. */
9055 break;
9057 if (x)
9059 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9060 code = GET_CODE (x);
9061 if (reverse_code)
9063 code = reverse_condition (code);
9064 if (code == UNKNOWN)
9065 return 0;
9066 did_reverse_condition ^= 1;
9067 reverse_code = 0;
9070 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9071 if (earliest)
9072 *earliest = prev;
9076 /* If constant is first, put it last. */
9077 if (CONSTANT_P (op0))
9078 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9080 /* If OP0 is the result of a comparison, we weren't able to find what
9081 was really being compared, so fail. */
9082 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9083 return 0;
9085 /* Canonicalize any ordered comparison with integers involving equality
9086 if we can do computations in the relevant mode and we do not
9087 overflow. */
9089 if (GET_CODE (op1) == CONST_INT
9090 && GET_MODE (op0) != VOIDmode
9091 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9093 HOST_WIDE_INT const_val = INTVAL (op1);
9094 unsigned HOST_WIDE_INT uconst_val = const_val;
9095 unsigned HOST_WIDE_INT max_val
9096 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9098 switch (code)
9100 case LE:
9101 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9102 code = LT, op1 = GEN_INT (const_val + 1);
9103 break;
9105 /* When cross-compiling, const_val might be sign-extended from
9106 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9107 case GE:
9108 if ((HOST_WIDE_INT) (const_val & max_val)
9109 != (((HOST_WIDE_INT) 1
9110 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9111 code = GT, op1 = GEN_INT (const_val - 1);
9112 break;
9114 case LEU:
9115 if (uconst_val < max_val)
9116 code = LTU, op1 = GEN_INT (uconst_val + 1);
9117 break;
9119 case GEU:
9120 if (uconst_val != 0)
9121 code = GTU, op1 = GEN_INT (uconst_val - 1);
9122 break;
9124 default:
9125 break;
9129 /* If this was floating-point and we reversed anything other than an
9130 EQ or NE or (UN)ORDERED, return zero. */
9131 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
9132 && did_reverse_condition
9133 && code != NE && code != EQ && code != UNORDERED && code != ORDERED
9134 && ! flag_fast_math
9135 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9136 return 0;
9138 #ifdef HAVE_cc0
9139 /* Never return CC0; return zero instead. */
9140 if (op0 == cc0_rtx)
9141 return 0;
9142 #endif
9144 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9147 /* Given a jump insn JUMP, return the condition that will cause it to branch
9148 to its JUMP_LABEL. If the condition cannot be understood, or is an
9149 inequality floating-point comparison which needs to be reversed, 0 will
9150 be returned.
9152 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9153 insn used in locating the condition was found. If a replacement test
9154 of the condition is desired, it should be placed in front of that
9155 insn and we will be sure that the inputs are still valid. */
9158 get_condition (jump, earliest)
9159 rtx jump;
9160 rtx *earliest;
9162 rtx cond;
9163 int reverse;
9164 rtx set;
9166 /* If this is not a standard conditional jump, we can't parse it. */
9167 if (GET_CODE (jump) != JUMP_INSN
9168 || ! any_condjump_p (jump))
9169 return 0;
9170 set = pc_set (jump);
9172 cond = XEXP (SET_SRC (set), 0);
9174 /* If this branches to JUMP_LABEL when the condition is false, reverse
9175 the condition. */
9176 reverse
9177 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9178 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9180 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9183 /* Similar to above routine, except that we also put an invariant last
9184 unless both operands are invariants. */
9187 get_condition_for_loop (loop, x)
9188 const struct loop *loop;
9189 rtx x;
9191 rtx comparison = get_condition (x, NULL_PTR);
9193 if (comparison == 0
9194 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9195 || loop_invariant_p (loop, XEXP (comparison, 1)))
9196 return comparison;
9198 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9199 XEXP (comparison, 1), XEXP (comparison, 0));
9202 /* Scan the function and determine whether it has indirect (computed) jumps.
9204 This is taken mostly from flow.c; similar code exists elsewhere
9205 in the compiler. It may be useful to put this into rtlanal.c. */
9206 static int
9207 indirect_jump_in_function_p (start)
9208 rtx start;
9210 rtx insn;
9212 for (insn = start; insn; insn = NEXT_INSN (insn))
9213 if (computed_jump_p (insn))
9214 return 1;
9216 return 0;
9219 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9220 documentation for LOOP_MEMS for the definition of `appropriate'.
9221 This function is called from prescan_loop via for_each_rtx. */
9223 static int
9224 insert_loop_mem (mem, data)
9225 rtx *mem;
9226 void *data ATTRIBUTE_UNUSED;
9228 int i;
9229 rtx m = *mem;
9231 if (m == NULL_RTX)
9232 return 0;
9234 switch (GET_CODE (m))
9236 case MEM:
9237 break;
9239 case CLOBBER:
9240 /* We're not interested in MEMs that are only clobbered. */
9241 return -1;
9243 case CONST_DOUBLE:
9244 /* We're not interested in the MEM associated with a
9245 CONST_DOUBLE, so there's no need to traverse into this. */
9246 return -1;
9248 case EXPR_LIST:
9249 /* We're not interested in any MEMs that only appear in notes. */
9250 return -1;
9252 default:
9253 /* This is not a MEM. */
9254 return 0;
9257 /* See if we've already seen this MEM. */
9258 for (i = 0; i < loop_mems_idx; ++i)
9259 if (rtx_equal_p (m, loop_mems[i].mem))
9261 if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
9262 /* The modes of the two memory accesses are different. If
9263 this happens, something tricky is going on, and we just
9264 don't optimize accesses to this MEM. */
9265 loop_mems[i].optimize = 0;
9267 return 0;
9270 /* Resize the array, if necessary. */
9271 if (loop_mems_idx == loop_mems_allocated)
9273 if (loop_mems_allocated != 0)
9274 loop_mems_allocated *= 2;
9275 else
9276 loop_mems_allocated = 32;
9278 loop_mems = (loop_mem_info*)
9279 xrealloc (loop_mems,
9280 loop_mems_allocated * sizeof (loop_mem_info));
9283 /* Actually insert the MEM. */
9284 loop_mems[loop_mems_idx].mem = m;
9285 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9286 because we can't put it in a register. We still store it in the
9287 table, though, so that if we see the same address later, but in a
9288 non-BLK mode, we'll not think we can optimize it at that point. */
9289 loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
9290 loop_mems[loop_mems_idx].reg = NULL_RTX;
9291 ++loop_mems_idx;
9293 return 0;
9296 /* Like load_mems, but also ensures that SET_IN_LOOP,
9297 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
9298 values after load_mems. */
9300 static void
9301 load_mems_and_recount_loop_regs_set (loop, insn_count)
9302 const struct loop *loop;
9303 int *insn_count;
9305 int nregs = max_reg_num ();
9307 load_mems (loop);
9309 /* Recalculate set_in_loop and friends since load_mems may have
9310 created new registers. */
9311 if (max_reg_num () > nregs)
9313 int i;
9314 int old_nregs;
9316 old_nregs = nregs;
9317 nregs = max_reg_num ();
9319 if ((unsigned) nregs > set_in_loop->num_elements)
9321 /* Grow all the arrays. */
9322 VARRAY_GROW (set_in_loop, nregs);
9323 VARRAY_GROW (n_times_set, nregs);
9324 VARRAY_GROW (may_not_optimize, nregs);
9325 VARRAY_GROW (reg_single_usage, nregs);
9327 /* Clear the arrays */
9328 bzero ((char *) &set_in_loop->data, nregs * sizeof (int));
9329 bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
9330 bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
9332 count_loop_regs_set (loop, may_not_optimize, reg_single_usage,
9333 insn_count, nregs);
9335 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9337 VARRAY_CHAR (may_not_optimize, i) = 1;
9338 VARRAY_INT (set_in_loop, i) = 1;
9341 #ifdef AVOID_CCMODE_COPIES
9342 /* Don't try to move insns which set CC registers if we should not
9343 create CCmode register copies. */
9344 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9345 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9346 VARRAY_CHAR (may_not_optimize, i) = 1;
9347 #endif
9349 /* Set n_times_set for the new registers. */
9350 bcopy ((char *) (&set_in_loop->data.i[0] + old_nregs),
9351 (char *) (&n_times_set->data.i[0] + old_nregs),
9352 (nregs - old_nregs) * sizeof (int));
9356 /* Move MEMs into registers for the duration of the loop. */
9358 static void
9359 load_mems (loop)
9360 const struct loop *loop;
9362 int maybe_never = 0;
9363 int i;
9364 rtx p;
9365 rtx label = NULL_RTX;
9366 rtx end_label = NULL_RTX;
9367 /* Nonzero if the next instruction may never be executed. */
9368 int next_maybe_never = 0;
9369 int last_max_reg = max_reg_num ();
9371 if (loop_mems_idx == 0)
9372 return;
9374 /* Find start of the extended basic block that enters the loop. */
9375 for (p = loop->start;
9376 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9377 p = PREV_INSN (p))
9380 cselib_init ();
9382 /* Build table of mems that get set to constant values before the
9383 loop. */
9384 for (; p != loop->start; p = NEXT_INSN (p))
9385 cselib_process_insn (p);
9387 /* Check to see if it's possible that some instructions in the
9388 loop are never executed. */
9389 for (p = next_insn_in_loop (loop, loop->scan_start);
9390 p != NULL_RTX && ! maybe_never;
9391 p = next_insn_in_loop (loop, p))
9393 if (GET_CODE (p) == CODE_LABEL)
9394 maybe_never = 1;
9395 else if (GET_CODE (p) == JUMP_INSN
9396 /* If we enter the loop in the middle, and scan
9397 around to the beginning, don't set maybe_never
9398 for that. This must be an unconditional jump,
9399 otherwise the code at the top of the loop might
9400 never be executed. Unconditional jumps are
9401 followed a by barrier then loop end. */
9402 && ! (GET_CODE (p) == JUMP_INSN
9403 && JUMP_LABEL (p) == loop->top
9404 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9405 && any_uncondjump_p (p)))
9407 if (!any_condjump_p (p))
9408 /* Something complicated. */
9409 maybe_never = 1;
9410 else
9411 /* If there are any more instructions in the loop, they
9412 might not be reached. */
9413 next_maybe_never = 1;
9415 else if (next_maybe_never)
9416 maybe_never = 1;
9419 /* Actually move the MEMs. */
9420 for (i = 0; i < loop_mems_idx; ++i)
9422 regset_head load_copies;
9423 regset_head store_copies;
9424 int written = 0;
9425 rtx reg;
9426 rtx mem = loop_mems[i].mem;
9427 rtx mem_list_entry;
9429 if (MEM_VOLATILE_P (mem)
9430 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9431 /* There's no telling whether or not MEM is modified. */
9432 loop_mems[i].optimize = 0;
9434 /* Go through the MEMs written to in the loop to see if this
9435 one is aliased by one of them. */
9436 mem_list_entry = loop_store_mems;
9437 while (mem_list_entry)
9439 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9440 written = 1;
9441 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9442 mem, rtx_varies_p))
9444 /* MEM is indeed aliased by this store. */
9445 loop_mems[i].optimize = 0;
9446 break;
9448 mem_list_entry = XEXP (mem_list_entry, 1);
9451 if (flag_float_store && written
9452 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9453 loop_mems[i].optimize = 0;
9455 /* If this MEM is written to, we must be sure that there
9456 are no reads from another MEM that aliases this one. */
9457 if (loop_mems[i].optimize && written)
9459 int j;
9461 for (j = 0; j < loop_mems_idx; ++j)
9463 if (j == i)
9464 continue;
9465 else if (true_dependence (mem,
9466 VOIDmode,
9467 loop_mems[j].mem,
9468 rtx_varies_p))
9470 /* It's not safe to hoist loop_mems[i] out of
9471 the loop because writes to it might not be
9472 seen by reads from loop_mems[j]. */
9473 loop_mems[i].optimize = 0;
9474 break;
9479 if (maybe_never && may_trap_p (mem))
9480 /* We can't access the MEM outside the loop; it might
9481 cause a trap that wouldn't have happened otherwise. */
9482 loop_mems[i].optimize = 0;
9484 if (!loop_mems[i].optimize)
9485 /* We thought we were going to lift this MEM out of the
9486 loop, but later discovered that we could not. */
9487 continue;
9489 INIT_REG_SET (&load_copies);
9490 INIT_REG_SET (&store_copies);
9492 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9493 order to keep scan_loop from moving stores to this MEM
9494 out of the loop just because this REG is neither a
9495 user-variable nor used in the loop test. */
9496 reg = gen_reg_rtx (GET_MODE (mem));
9497 REG_USERVAR_P (reg) = 1;
9498 loop_mems[i].reg = reg;
9500 /* Now, replace all references to the MEM with the
9501 corresponding pesudos. */
9502 maybe_never = 0;
9503 for (p = next_insn_in_loop (loop, loop->scan_start);
9504 p != NULL_RTX;
9505 p = next_insn_in_loop (loop, p))
9507 rtx_and_int ri;
9509 if (INSN_P (p))
9511 rtx set;
9513 set = single_set (p);
9515 /* See if this copies the mem into a register that isn't
9516 modified afterwards. We'll try to do copy propagation
9517 a little further on. */
9518 if (set
9519 /* @@@ This test is _way_ too conservative. */
9520 && ! maybe_never
9521 && GET_CODE (SET_DEST (set)) == REG
9522 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9523 && REGNO (SET_DEST (set)) < last_max_reg
9524 && VARRAY_INT (n_times_set, REGNO (SET_DEST (set))) == 1
9525 && rtx_equal_p (SET_SRC (set), mem))
9526 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9528 /* See if this copies the mem from a register that isn't
9529 modified afterwards. We'll try to remove the
9530 redundant copy later on by doing a little register
9531 renaming and copy propagation. This will help
9532 to untangle things for the BIV detection code. */
9533 if (set
9534 && ! maybe_never
9535 && GET_CODE (SET_SRC (set)) == REG
9536 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9537 && REGNO (SET_SRC (set)) < last_max_reg
9538 && VARRAY_INT (n_times_set, REGNO (SET_SRC (set))) == 1
9539 && rtx_equal_p (SET_DEST (set), mem))
9540 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9542 /* Replace the memory reference with the shadow register. */
9543 ri.r = p;
9544 ri.i = i;
9545 for_each_rtx (&p, replace_loop_mem, &ri);
9548 if (GET_CODE (p) == CODE_LABEL
9549 || GET_CODE (p) == JUMP_INSN)
9550 maybe_never = 1;
9553 if (! apply_change_group ())
9554 /* We couldn't replace all occurrences of the MEM. */
9555 loop_mems[i].optimize = 0;
9556 else
9558 /* Load the memory immediately before LOOP->START, which is
9559 the NOTE_LOOP_BEG. */
9560 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9561 rtx set;
9562 rtx best = mem;
9563 int j;
9564 struct elt_loc_list *const_equiv = 0;
9566 if (e)
9568 struct elt_loc_list *equiv;
9569 struct elt_loc_list *best_equiv = 0;
9570 for (equiv = e->locs; equiv; equiv = equiv->next)
9572 if (CONSTANT_P (equiv->loc))
9573 const_equiv = equiv;
9574 else if (GET_CODE (equiv->loc) == REG
9575 /* Extending hard register lifetimes cuases crash
9576 on SRC targets. Doing so on non-SRC is
9577 probably also not good idea, since we most
9578 probably have pseudoregister equivalence as
9579 well. */
9580 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9581 best_equiv = equiv;
9583 /* Use the constant equivalence if that is cheap enough. */
9584 if (! best_equiv)
9585 best_equiv = const_equiv;
9586 else if (const_equiv
9587 && (rtx_cost (const_equiv->loc, SET)
9588 <= rtx_cost (best_equiv->loc, SET)))
9590 best_equiv = const_equiv;
9591 const_equiv = 0;
9594 /* If best_equiv is nonzero, we know that MEM is set to a
9595 constant or register before the loop. We will use this
9596 knowledge to initialize the shadow register with that
9597 constant or reg rather than by loading from MEM. */
9598 if (best_equiv)
9599 best = copy_rtx (best_equiv->loc);
9601 set = gen_move_insn (reg, best);
9602 set = emit_insn_before (set, loop->start);
9603 if (const_equiv)
9604 REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
9605 copy_rtx (const_equiv->loc),
9606 REG_NOTES (set));
9608 if (written)
9610 if (label == NULL_RTX)
9612 /* We must compute the former
9613 right-after-the-end label before we insert
9614 the new one. */
9615 end_label = next_label (loop->end);
9616 label = gen_label_rtx ();
9617 emit_label_after (label, loop->end);
9620 /* Store the memory immediately after END, which is
9621 the NOTE_LOOP_END. */
9622 set = gen_move_insn (copy_rtx (mem), reg);
9623 emit_insn_after (set, label);
9626 if (loop_dump_stream)
9628 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9629 REGNO (reg), (written ? "r/w" : "r/o"));
9630 print_rtl (loop_dump_stream, mem);
9631 fputc ('\n', loop_dump_stream);
9634 /* Attempt a bit of copy propagation. This helps untangle the
9635 data flow, and enables {basic,general}_induction_var to find
9636 more bivs/givs. */
9637 EXECUTE_IF_SET_IN_REG_SET
9638 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9640 try_copy_prop (loop, reg, j);
9642 CLEAR_REG_SET (&load_copies);
9644 EXECUTE_IF_SET_IN_REG_SET
9645 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9647 try_swap_copy_prop (loop, reg, j);
9649 CLEAR_REG_SET (&store_copies);
9653 if (label != NULL_RTX)
9655 /* Now, we need to replace all references to the previous exit
9656 label with the new one. */
9657 rtx_pair rr;
9658 rr.r1 = end_label;
9659 rr.r2 = label;
9661 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9663 for_each_rtx (&p, replace_label, &rr);
9665 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9666 field. This is not handled by for_each_rtx because it doesn't
9667 handle unprinted ('0') fields. We need to update JUMP_LABEL
9668 because the immediately following unroll pass will use it.
9669 replace_label would not work anyways, because that only handles
9670 LABEL_REFs. */
9671 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9672 JUMP_LABEL (p) = label;
9676 cselib_finish ();
9679 /* For communication between note_reg_stored and its caller. */
9680 struct note_reg_stored_arg
9682 int set_seen;
9683 rtx reg;
9686 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9687 is equal to ARG. */
9688 static void
9689 note_reg_stored (x, setter, arg)
9690 rtx x, setter ATTRIBUTE_UNUSED;
9691 void *arg;
9693 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9694 if (t->reg == x)
9695 t->set_seen = 1;
9698 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9699 There must be exactly one insn that sets this pseudo; it will be
9700 deleted if all replacements succeed and we can prove that the register
9701 is not used after the loop. */
9703 static void
9704 try_copy_prop (loop, replacement, regno)
9705 const struct loop *loop;
9706 rtx replacement;
9707 unsigned int regno;
9709 /* This is the reg that we are copying from. */
9710 rtx reg_rtx = regno_reg_rtx[regno];
9711 rtx init_insn = 0;
9712 rtx insn;
9713 /* These help keep track of whether we replaced all uses of the reg. */
9714 int replaced_last = 0;
9715 int store_is_first = 0;
9717 for (insn = next_insn_in_loop (loop, loop->scan_start);
9718 insn != NULL_RTX;
9719 insn = next_insn_in_loop (loop, insn))
9721 rtx set;
9723 /* Only substitute within one extended basic block from the initializing
9724 insn. */
9725 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9726 break;
9728 if (! INSN_P (insn))
9729 continue;
9731 /* Is this the initializing insn? */
9732 set = single_set (insn);
9733 if (set
9734 && GET_CODE (SET_DEST (set)) == REG
9735 && REGNO (SET_DEST (set)) == regno)
9737 if (init_insn)
9738 abort ();
9740 init_insn = insn;
9741 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9742 store_is_first = 1;
9745 /* Only substitute after seeing the initializing insn. */
9746 if (init_insn && insn != init_insn)
9748 struct note_reg_stored_arg arg;
9749 rtx array[3];
9750 array[0] = reg_rtx;
9751 array[1] = replacement;
9752 array[2] = insn;
9754 for_each_rtx (&insn, replace_loop_reg, array);
9755 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9756 replaced_last = 1;
9758 /* Stop replacing when REPLACEMENT is modified. */
9759 arg.reg = replacement;
9760 arg.set_seen = 0;
9761 note_stores (PATTERN (insn), note_reg_stored, &arg);
9762 if (arg.set_seen)
9763 break;
9766 if (! init_insn)
9767 abort ();
9768 if (apply_change_group ())
9770 if (loop_dump_stream)
9771 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9772 if (store_is_first && replaced_last)
9774 PUT_CODE (init_insn, NOTE);
9775 NOTE_LINE_NUMBER (init_insn) = NOTE_INSN_DELETED;
9776 if (loop_dump_stream)
9777 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
9778 INSN_UID (init_insn));
9780 if (loop_dump_stream)
9781 fprintf (loop_dump_stream, ".\n");
9786 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
9787 loop LOOP if the order of the sets of these registers can be
9788 swapped. There must be exactly one insn within the loop that sets
9789 this pseudo followed immediately by a move insn that sets
9790 REPLACEMENT with REGNO. */
9791 static void
9792 try_swap_copy_prop (loop, replacement, regno)
9793 const struct loop *loop;
9794 rtx replacement;
9795 unsigned int regno;
9797 rtx insn;
9798 rtx set;
9799 unsigned int new_regno;
9801 new_regno = REGNO (replacement);
9803 for (insn = next_insn_in_loop (loop, loop->scan_start);
9804 insn != NULL_RTX;
9805 insn = next_insn_in_loop (loop, insn))
9807 /* Search for the insn that copies REGNO to NEW_REGNO? */
9808 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
9809 && (set = single_set (insn))
9810 && GET_CODE (SET_DEST (set)) == REG
9811 && REGNO (SET_DEST (set)) == new_regno
9812 && GET_CODE (SET_SRC (set)) == REG
9813 && REGNO (SET_SRC (set)) == regno)
9814 break;
9817 if (insn != NULL_RTX)
9819 rtx prev_insn;
9820 rtx prev_set;
9822 /* Some DEF-USE info would come in handy here to make this
9823 function more general. For now, just check the previous insn
9824 which is the most likely candidate for setting REGNO. */
9826 prev_insn = PREV_INSN (insn);
9828 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
9829 && (prev_set = single_set (prev_insn))
9830 && GET_CODE (SET_DEST (prev_set)) == REG
9831 && REGNO (SET_DEST (prev_set)) == regno)
9833 /* We have:
9834 (set (reg regno) (expr))
9835 (set (reg new_regno) (reg regno))
9837 so try converting this to:
9838 (set (reg new_regno) (expr))
9839 (set (reg regno) (reg new_regno))
9841 The former construct is often generated when a global
9842 variable used for an induction variable is shadowed by a
9843 register (NEW_REGNO). The latter construct improves the
9844 chances of GIV replacement and BIV elimination. */
9846 validate_change (prev_insn, &SET_DEST (prev_set),
9847 replacement, 1);
9848 validate_change (insn, &SET_DEST (set),
9849 SET_SRC (set), 1);
9850 validate_change (insn, &SET_SRC (set),
9851 replacement, 1);
9853 if (apply_change_group ())
9855 if (loop_dump_stream)
9856 fprintf (loop_dump_stream,
9857 " Swapped set of reg %d at %d with reg %d at %d.\n",
9858 regno, INSN_UID (insn),
9859 new_regno, INSN_UID (prev_insn));
9861 /* Update first use of REGNO. */
9862 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
9863 REGNO_FIRST_UID (regno) = INSN_UID (insn);
9865 /* Now perform copy propagation to hopefully
9866 remove all uses of REGNO within the loop. */
9867 try_copy_prop (loop, replacement, regno);
9874 /* Replace MEM with its associated pseudo register. This function is
9875 called from load_mems via for_each_rtx. DATA is actually an
9876 rtx_and_int * describing the instruction currently being scanned
9877 and the MEM we are currently replacing. */
9879 static int
9880 replace_loop_mem (mem, data)
9881 rtx *mem;
9882 void *data;
9884 rtx_and_int *ri;
9885 rtx insn;
9886 int i;
9887 rtx m = *mem;
9889 if (m == NULL_RTX)
9890 return 0;
9892 switch (GET_CODE (m))
9894 case MEM:
9895 break;
9897 case CONST_DOUBLE:
9898 /* We're not interested in the MEM associated with a
9899 CONST_DOUBLE, so there's no need to traverse into one. */
9900 return -1;
9902 default:
9903 /* This is not a MEM. */
9904 return 0;
9907 ri = (rtx_and_int *) data;
9908 i = ri->i;
9910 if (!rtx_equal_p (loop_mems[i].mem, m))
9911 /* This is not the MEM we are currently replacing. */
9912 return 0;
9914 insn = ri->r;
9916 /* Actually replace the MEM. */
9917 validate_change (insn, mem, loop_mems[i].reg, 1);
9919 return 0;
9922 /* Replace one register with another. Called through for_each_rtx; PX points
9923 to the rtx being scanned. DATA is actually an array of three rtx's; the
9924 first one is the one to be replaced, and the second one the replacement.
9925 The third one is the current insn. */
9927 static int
9928 replace_loop_reg (px, data)
9929 rtx *px;
9930 void *data;
9932 rtx x = *px;
9933 rtx *array = (rtx *) data;
9935 if (x == NULL_RTX)
9936 return 0;
9938 if (x == array[0])
9939 validate_change (array[2], px, array[1], 1);
9941 return 0;
9944 /* Replace occurrences of the old exit label for the loop with the new
9945 one. DATA is an rtx_pair containing the old and new labels,
9946 respectively. */
9948 static int
9949 replace_label (x, data)
9950 rtx *x;
9951 void *data;
9953 rtx l = *x;
9954 rtx old_label = ((rtx_pair *) data)->r1;
9955 rtx new_label = ((rtx_pair *) data)->r2;
9957 if (l == NULL_RTX)
9958 return 0;
9960 if (GET_CODE (l) != LABEL_REF)
9961 return 0;
9963 if (XEXP (l, 0) != old_label)
9964 return 0;
9966 XEXP (l, 0) = new_label;
9967 ++LABEL_NUSES (new_label);
9968 --LABEL_NUSES (old_label);
9970 return 0;
9973 #define LOOP_BLOCK_NUM_1(INSN) \
9974 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
9976 /* The notes do not have an assigned block, so look at the next insn. */
9977 #define LOOP_BLOCK_NUM(INSN) \
9978 ((INSN) ? (GET_CODE (INSN) == NOTE \
9979 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
9980 : LOOP_BLOCK_NUM_1 (INSN)) \
9981 : -1)
9983 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
9985 static void loop_dump_aux (loop, file, verbose)
9986 const struct loop *loop;
9987 FILE *file;
9988 int verbose;
9990 rtx label;
9992 if (! loop || ! file)
9993 return;
9995 /* Print diagnostics to compare our concept of a loop with
9996 what the loop notes say. */
9997 if (! PREV_INSN (loop->first->head)
9998 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
9999 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10000 != NOTE_INSN_LOOP_BEG)
10001 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10002 INSN_UID (PREV_INSN (loop->first->head)));
10003 if (! NEXT_INSN (loop->last->end)
10004 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10005 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10006 != NOTE_INSN_LOOP_END)
10007 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10008 INSN_UID (NEXT_INSN (loop->last->end)));
10010 if (loop->start)
10012 fprintf (file,
10013 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10014 LOOP_BLOCK_NUM (loop->start),
10015 LOOP_INSN_UID (loop->start),
10016 LOOP_BLOCK_NUM (loop->cont),
10017 LOOP_INSN_UID (loop->cont),
10018 LOOP_BLOCK_NUM (loop->cont),
10019 LOOP_INSN_UID (loop->cont),
10020 LOOP_BLOCK_NUM (loop->vtop),
10021 LOOP_INSN_UID (loop->vtop),
10022 LOOP_BLOCK_NUM (loop->end),
10023 LOOP_INSN_UID (loop->end));
10024 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10025 LOOP_BLOCK_NUM (loop->top),
10026 LOOP_INSN_UID (loop->top) ,
10027 LOOP_BLOCK_NUM (loop->scan_start),
10028 LOOP_INSN_UID (loop->scan_start));
10029 fprintf (file, ";; exit_count %d", loop->exit_count);
10030 if (loop->exit_count)
10032 fputs (", labels:", file);
10033 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10035 fprintf (file, " %d ",
10036 LOOP_INSN_UID (XEXP (label, 0)));
10039 fputs ("\n", file);
10041 /* This can happen when a marked loop appears as two nested loops,
10042 say from while (a || b) {}. The inner loop won't match
10043 the loop markers but the outer one will. */
10044 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10045 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10050 /* Call this function from the debugger to dump LOOP. */
10052 void
10053 debug_loop (loop)
10054 const struct loop *loop;
10056 flow_loop_dump (loop, stderr, loop_dump_aux, 1);