* rtl.h (note_stores): Add additional paramter.
[official-gcc.git] / gcc / loop.c
blob62df8b04e661ed4fa459a7d7617fb129dde6a89c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "insn-config.h"
45 #include "insn-flags.h"
46 #include "regs.h"
47 #include "hard-reg-set.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "except.h"
53 #include "toplev.h"
55 /* Information about the loop being processed used to compute
56 the number of loop iterations for loop unrolling and doloop
57 optimization. */
58 static struct loop_info this_loop_info;
60 /* Vector mapping INSN_UIDs to luids.
61 The luids are like uids but increase monotonically always.
62 We use them to see whether a jump comes from outside a given loop. */
64 int *uid_luid;
66 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
67 number the insn is contained in. */
69 int *uid_loop_num;
71 /* 1 + largest uid of any insn. */
73 int max_uid_for_loop;
75 /* 1 + luid of last insn. */
77 static int max_luid;
79 /* Number of loops detected in current function. Used as index to the
80 next few tables. */
82 static int max_loop_num;
84 /* Indexed by loop number, contains the first and last insn of each loop. */
86 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
88 /* Likewise for the continue insn */
89 static rtx *loop_number_loop_cont;
91 /* The first code_label that is reached in every loop iteration.
92 0 when not computed yet, initially const0_rtx if a jump couldn't be
93 followed.
94 Also set to 0 when there is no such label before the NOTE_INSN_LOOP_CONT
95 of this loop, or in verify_dominator, if a jump couldn't be followed. */
96 static rtx *loop_number_cont_dominator;
98 /* For each loop, gives the containing loop number, -1 if none. */
100 int *loop_outer_loop;
102 #ifdef HAVE_decrement_and_branch_on_count
103 /* Records whether resource in use by inner loop. */
105 int *loop_used_count_register;
106 #endif /* HAVE_decrement_and_branch_on_count */
108 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
109 really a loop (an insn outside the loop branches into it). */
111 static char *loop_invalid;
113 /* Indexed by loop number, links together all LABEL_REFs which refer to
114 code labels outside the loop. Used by routines that need to know all
115 loop exits, such as final_biv_value and final_giv_value.
117 This does not include loop exits due to return instructions. This is
118 because all bivs and givs are pseudos, and hence must be dead after a
119 return, so the presense of a return does not affect any of the
120 optimizations that use this info. It is simpler to just not include return
121 instructions on this list. */
123 rtx *loop_number_exit_labels;
125 /* Indexed by loop number, counts the number of LABEL_REFs on
126 loop_number_exit_labels for this loop and all loops nested inside it. */
128 int *loop_number_exit_count;
130 /* Indexed by register number, contains the number of times the reg
131 is set during the loop being scanned.
132 During code motion, a negative value indicates a reg that has been
133 made a candidate; in particular -2 means that it is an candidate that
134 we know is equal to a constant and -1 means that it is an candidate
135 not known equal to a constant.
136 After code motion, regs moved have 0 (which is accurate now)
137 while the failed candidates have the original number of times set.
139 Therefore, at all times, == 0 indicates an invariant register;
140 < 0 a conditionally invariant one. */
142 static varray_type set_in_loop;
144 /* Original value of set_in_loop; same except that this value
145 is not set negative for a reg whose sets have been made candidates
146 and not set to 0 for a reg that is moved. */
148 static varray_type n_times_set;
150 /* Index by register number, 1 indicates that the register
151 cannot be moved or strength reduced. */
153 static varray_type may_not_optimize;
155 /* Contains the insn in which a register was used if it was used
156 exactly once; contains const0_rtx if it was used more than once. */
158 static varray_type reg_single_usage;
160 /* Nonzero means reg N has already been moved out of one loop.
161 This reduces the desire to move it out of another. */
163 static char *moved_once;
165 /* List of MEMs that are stored in this loop. */
167 static rtx loop_store_mems;
169 /* The insn where the first of these was found. */
170 static rtx first_loop_store_insn;
172 typedef struct loop_mem_info {
173 rtx mem; /* The MEM itself. */
174 rtx reg; /* Corresponding pseudo, if any. */
175 int optimize; /* Nonzero if we can optimize access to this MEM. */
176 } loop_mem_info;
178 /* Array of MEMs that are used (read or written) in this loop, but
179 cannot be aliased by anything in this loop, except perhaps
180 themselves. In other words, if loop_mems[i] is altered during the
181 loop, it is altered by an expression that is rtx_equal_p to it. */
183 static loop_mem_info *loop_mems;
185 /* The index of the next available slot in LOOP_MEMS. */
187 static int loop_mems_idx;
189 /* The number of elements allocated in LOOP_MEMs. */
191 static int loop_mems_allocated;
193 /* Nonzero if we don't know what MEMs were changed in the current
194 loop. This happens if the loop contains a call (in which case
195 `loop_info->has_call' will also be set) or if we store into more
196 than NUM_STORES MEMs. */
198 static int unknown_address_altered;
200 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
201 static int num_movables;
203 /* Count of memory write instructions discovered in the loop. */
204 static int num_mem_sets;
206 /* Bound on pseudo register number before loop optimization.
207 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
208 int max_reg_before_loop;
210 /* This obstack is used in product_cheap_p to allocate its rtl. It
211 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
212 If we used the same obstack that it did, we would be deallocating
213 that array. */
215 static struct obstack temp_obstack;
217 /* This is where the pointer to the obstack being used for RTL is stored. */
219 extern struct obstack *rtl_obstack;
221 #define obstack_chunk_alloc xmalloc
222 #define obstack_chunk_free free
224 /* During the analysis of a loop, a chain of `struct movable's
225 is made to record all the movable insns found.
226 Then the entire chain can be scanned to decide which to move. */
228 struct movable
230 rtx insn; /* A movable insn */
231 rtx set_src; /* The expression this reg is set from. */
232 rtx set_dest; /* The destination of this SET. */
233 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
234 of any registers used within the LIBCALL. */
235 int consec; /* Number of consecutive following insns
236 that must be moved with this one. */
237 int regno; /* The register it sets */
238 short lifetime; /* lifetime of that register;
239 may be adjusted when matching movables
240 that load the same value are found. */
241 short savings; /* Number of insns we can move for this reg,
242 including other movables that force this
243 or match this one. */
244 unsigned int cond : 1; /* 1 if only conditionally movable */
245 unsigned int force : 1; /* 1 means MUST move this insn */
246 unsigned int global : 1; /* 1 means reg is live outside this loop */
247 /* If PARTIAL is 1, GLOBAL means something different:
248 that the reg is live outside the range from where it is set
249 to the following label. */
250 unsigned int done : 1; /* 1 inhibits further processing of this */
252 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
253 In particular, moving it does not make it
254 invariant. */
255 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
256 load SRC, rather than copying INSN. */
257 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
258 first insn of a consecutive sets group. */
259 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
260 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
261 that we should avoid changing when clearing
262 the rest of the reg. */
263 struct movable *match; /* First entry for same value */
264 struct movable *forces; /* An insn that must be moved if this is */
265 struct movable *next;
268 static struct movable *the_movables;
270 FILE *loop_dump_stream;
272 /* Forward declarations. */
274 static void verify_dominator PROTO((int));
275 static void find_and_verify_loops PROTO((rtx));
276 static void mark_loop_jump PROTO((rtx, int));
277 static void prescan_loop PROTO((rtx, rtx, struct loop_info *));
278 static int reg_in_basic_block_p PROTO((rtx, rtx));
279 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
280 static int labels_in_range_p PROTO((rtx, int));
281 static void count_one_set PROTO((rtx, rtx, varray_type, rtx *));
283 static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type,
284 int *, int));
285 static void note_addr_stored PROTO((rtx, rtx, void *));
286 static void note_set_pseudo_multiple_uses PROTO((rtx, rtx, void *));
287 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
288 static void scan_loop PROTO((rtx, rtx, rtx, int, int));
289 #if 0
290 static void replace_call_address PROTO((rtx, rtx, rtx));
291 #endif
292 static rtx skip_consec_insns PROTO((rtx, int));
293 static int libcall_benefit PROTO((rtx));
294 static void ignore_some_movables PROTO((struct movable *));
295 static void force_movables PROTO((struct movable *));
296 static void combine_movables PROTO((struct movable *, int));
297 static int regs_match_p PROTO((rtx, rtx, struct movable *));
298 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
299 static void add_label_notes PROTO((rtx, rtx));
300 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
301 static int count_nonfixed_reads PROTO((rtx));
302 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx,
303 struct loop_info *, rtx, int, int));
304 static void find_single_use_in_loop PROTO((rtx, rtx, varray_type));
305 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
306 static void find_mem_givs PROTO((rtx, rtx, int, int, rtx, rtx));
307 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int));
308 static void check_final_value PROTO((struct induction *, rtx, rtx,
309 unsigned HOST_WIDE_INT));
310 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, int, rtx *, rtx, rtx));
311 static void update_giv_derive PROTO((rtx));
312 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *, rtx **));
313 static rtx simplify_giv_expr PROTO((rtx, int *));
314 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
315 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *, rtx *));
316 static int check_dbra_loop PROTO((rtx, int, rtx, struct loop_info *));
317 static rtx express_from_1 PROTO((rtx, rtx, rtx));
318 static rtx combine_givs_p PROTO((struct induction *, struct induction *));
319 static void combine_givs PROTO((struct iv_class *));
320 struct recombine_givs_stats;
321 static int find_life_end PROTO((rtx, struct recombine_givs_stats *, rtx, rtx));
322 static void recombine_givs PROTO((struct iv_class *, rtx, rtx, int));
323 static int product_cheap_p PROTO((rtx, rtx));
324 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
325 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
326 static int last_use_this_basic_block PROTO((rtx, rtx));
327 static void record_initial PROTO((rtx, rtx, void *));
328 static void update_reg_last_use PROTO((rtx, rtx));
329 static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
330 static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
331 rtx, int *));
332 static void load_mems PROTO((rtx, rtx, rtx, rtx));
333 static int insert_loop_mem PROTO((rtx *, void *));
334 static int replace_loop_mem PROTO((rtx *, void *));
335 static int replace_label PROTO((rtx *, void *));
337 typedef struct rtx_and_int {
338 rtx r;
339 int i;
340 } rtx_and_int;
342 typedef struct rtx_pair {
343 rtx r1;
344 rtx r2;
345 } rtx_pair;
347 /* Nonzero iff INSN is between START and END, inclusive. */
348 #define INSN_IN_RANGE_P(INSN, START, END) \
349 (INSN_UID (INSN) < max_uid_for_loop \
350 && INSN_LUID (INSN) >= INSN_LUID (START) \
351 && INSN_LUID (INSN) <= INSN_LUID (END))
353 #ifdef HAVE_decrement_and_branch_on_count
354 /* Test whether BCT applicable and safe. */
355 static void insert_bct PROTO((rtx, rtx, struct loop_info *));
357 /* Auxiliary function that inserts the BCT pattern into the loop. */
358 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
359 #endif /* HAVE_decrement_and_branch_on_count */
361 /* Indirect_jump_in_function is computed once per function. */
362 int indirect_jump_in_function = 0;
363 static int indirect_jump_in_function_p PROTO((rtx));
365 static int compute_luids PROTO((rtx, rtx, int));
367 static int biv_elimination_giv_has_0_offset PROTO((struct induction *,
368 struct induction *, rtx));
370 /* Relative gain of eliminating various kinds of operations. */
371 static int add_cost;
372 #if 0
373 static int shift_cost;
374 static int mult_cost;
375 #endif
377 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
378 copy the value of the strength reduced giv to its original register. */
379 static int copy_cost;
381 /* Cost of using a register, to normalize the benefits of a giv. */
382 static int reg_address_cost;
385 void
386 init_loop ()
388 char *free_point = (char *) oballoc (1);
389 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
391 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
393 #ifdef ADDRESS_COST
394 reg_address_cost = ADDRESS_COST (reg);
395 #else
396 reg_address_cost = rtx_cost (reg, MEM);
397 #endif
399 /* We multiply by 2 to reconcile the difference in scale between
400 these two ways of computing costs. Otherwise the cost of a copy
401 will be far less than the cost of an add. */
403 copy_cost = 2 * 2;
405 /* Free the objects we just allocated. */
406 obfree (free_point);
408 /* Initialize the obstack used for rtl in product_cheap_p. */
409 gcc_obstack_init (&temp_obstack);
412 /* Compute the mapping from uids to luids.
413 LUIDs are numbers assigned to insns, like uids,
414 except that luids increase monotonically through the code.
415 Start at insn START and stop just before END. Assign LUIDs
416 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
417 static int
418 compute_luids (start, end, prev_luid)
419 rtx start, end;
420 int prev_luid;
422 int i;
423 rtx insn;
425 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
427 if (INSN_UID (insn) >= max_uid_for_loop)
428 continue;
429 /* Don't assign luids to line-number NOTEs, so that the distance in
430 luids between two insns is not affected by -g. */
431 if (GET_CODE (insn) != NOTE
432 || NOTE_LINE_NUMBER (insn) <= 0)
433 uid_luid[INSN_UID (insn)] = ++i;
434 else
435 /* Give a line number note the same luid as preceding insn. */
436 uid_luid[INSN_UID (insn)] = i;
438 return i + 1;
441 /* Entry point of this file. Perform loop optimization
442 on the current function. F is the first insn of the function
443 and DUMPFILE is a stream for output of a trace of actions taken
444 (or 0 if none should be output). */
446 void
447 loop_optimize (f, dumpfile, unroll_p, bct_p)
448 /* f is the first instruction of a chain of insns for one function */
449 rtx f;
450 FILE *dumpfile;
451 int unroll_p, bct_p;
453 register rtx insn;
454 register int i;
456 loop_dump_stream = dumpfile;
458 init_recog_no_volatile ();
460 max_reg_before_loop = max_reg_num ();
462 moved_once = (char *) alloca (max_reg_before_loop);
463 bzero (moved_once, max_reg_before_loop);
465 regs_may_share = 0;
467 /* Count the number of loops. */
469 max_loop_num = 0;
470 for (insn = f; insn; insn = NEXT_INSN (insn))
472 if (GET_CODE (insn) == NOTE
473 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
474 max_loop_num++;
477 /* Don't waste time if no loops. */
478 if (max_loop_num == 0)
479 return;
481 /* Get size to use for tables indexed by uids.
482 Leave some space for labels allocated by find_and_verify_loops. */
483 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
485 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
486 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
488 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
489 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
491 /* Allocate tables for recording each loop. We set each entry, so they need
492 not be zeroed. */
493 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
494 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
495 loop_number_loop_cont = (rtx *) alloca (max_loop_num * sizeof (rtx));
496 loop_number_cont_dominator = (rtx *) alloca (max_loop_num * sizeof (rtx));
497 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
498 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
499 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
500 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
502 #ifdef HAVE_decrement_and_branch_on_count
503 /* Allocate for BCT optimization */
504 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
505 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
506 #endif /* HAVE_decrement_and_branch_on_count */
508 /* Find and process each loop.
509 First, find them, and record them in order of their beginnings. */
510 find_and_verify_loops (f);
512 /* Now find all register lifetimes. This must be done after
513 find_and_verify_loops, because it might reorder the insns in the
514 function. */
515 reg_scan (f, max_reg_num (), 1);
517 /* This must occur after reg_scan so that registers created by gcse
518 will have entries in the register tables.
520 We could have added a call to reg_scan after gcse_main in toplev.c,
521 but moving this call to init_alias_analysis is more efficient. */
522 init_alias_analysis ();
524 /* See if we went too far. Note that get_max_uid already returns
525 one more that the maximum uid of all insn. */
526 if (get_max_uid () > max_uid_for_loop)
527 abort ();
528 /* Now reset it to the actual size we need. See above. */
529 max_uid_for_loop = get_max_uid ();
531 /* find_and_verify_loops has already called compute_luids, but it might
532 have rearranged code afterwards, so we need to recompute the luids now. */
533 max_luid = compute_luids (f, NULL_RTX, 0);
535 /* Don't leave gaps in uid_luid for insns that have been
536 deleted. It is possible that the first or last insn
537 using some register has been deleted by cross-jumping.
538 Make sure that uid_luid for that former insn's uid
539 points to the general area where that insn used to be. */
540 for (i = 0; i < max_uid_for_loop; i++)
542 uid_luid[0] = uid_luid[i];
543 if (uid_luid[0] != 0)
544 break;
546 for (i = 0; i < max_uid_for_loop; i++)
547 if (uid_luid[i] == 0)
548 uid_luid[i] = uid_luid[i - 1];
550 /* Create a mapping from loops to BLOCK tree nodes. */
551 if (unroll_p && write_symbols != NO_DEBUG)
552 find_loop_tree_blocks ();
554 /* Determine if the function has indirect jump. On some systems
555 this prevents low overhead loop instructions from being used. */
556 indirect_jump_in_function = indirect_jump_in_function_p (f);
558 /* Now scan the loops, last ones first, since this means inner ones are done
559 before outer ones. */
560 for (i = max_loop_num-1; i >= 0; i--)
561 if (! loop_invalid[i] && loop_number_loop_ends[i])
562 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
563 loop_number_loop_cont[i], unroll_p, bct_p);
565 /* If debugging and unrolling loops, we must replicate the tree nodes
566 corresponding to the blocks inside the loop, so that the original one
567 to one mapping will remain. */
568 if (unroll_p && write_symbols != NO_DEBUG)
569 unroll_block_trees ();
571 end_alias_analysis ();
574 /* Returns the next insn, in execution order, after INSN. START and
575 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
576 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
577 insn-stream; it is used with loops that are entered near the
578 bottom. */
580 static rtx
581 next_insn_in_loop (insn, start, end, loop_top)
582 rtx insn;
583 rtx start;
584 rtx end;
585 rtx loop_top;
587 insn = NEXT_INSN (insn);
589 if (insn == end)
591 if (loop_top)
592 /* Go to the top of the loop, and continue there. */
593 insn = loop_top;
594 else
595 /* We're done. */
596 insn = NULL_RTX;
599 if (insn == start)
600 /* We're done. */
601 insn = NULL_RTX;
603 return insn;
606 /* Optimize one loop whose start is LOOP_START and end is END.
607 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
608 NOTE_INSN_LOOP_END.
609 LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
611 /* ??? Could also move memory writes out of loops if the destination address
612 is invariant, the source is invariant, the memory write is not volatile,
613 and if we can prove that no read inside the loop can read this address
614 before the write occurs. If there is a read of this address after the
615 write, then we can also mark the memory read as invariant. */
617 static void
618 scan_loop (loop_start, end, loop_cont, unroll_p, bct_p)
619 rtx loop_start, end, loop_cont;
620 int unroll_p, bct_p;
622 register int i;
623 rtx p;
624 /* 1 if we are scanning insns that could be executed zero times. */
625 int maybe_never = 0;
626 /* 1 if we are scanning insns that might never be executed
627 due to a subroutine call which might exit before they are reached. */
628 int call_passed = 0;
629 /* For a rotated loop that is entered near the bottom,
630 this is the label at the top. Otherwise it is zero. */
631 rtx loop_top = 0;
632 /* Jump insn that enters the loop, or 0 if control drops in. */
633 rtx loop_entry_jump = 0;
634 /* Place in the loop where control enters. */
635 rtx scan_start;
636 /* Number of insns in the loop. */
637 int insn_count;
638 int in_libcall = 0;
639 int tem;
640 rtx temp;
641 /* The SET from an insn, if it is the only SET in the insn. */
642 rtx set, set1;
643 /* Chain describing insns movable in current loop. */
644 struct movable *movables = 0;
645 /* Last element in `movables' -- so we can add elements at the end. */
646 struct movable *last_movable = 0;
647 /* Ratio of extra register life span we can justify
648 for saving an instruction. More if loop doesn't call subroutines
649 since in that case saving an insn makes more difference
650 and more registers are available. */
651 int threshold;
652 /* Nonzero if we are scanning instructions in a sub-loop. */
653 int loop_depth = 0;
654 int nregs;
655 struct loop_info *loop_info = &this_loop_info;
657 /* Determine whether this loop starts with a jump down to a test at
658 the end. This will occur for a small number of loops with a test
659 that is too complex to duplicate in front of the loop.
661 We search for the first insn or label in the loop, skipping NOTEs.
662 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
663 (because we might have a loop executed only once that contains a
664 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
665 (in case we have a degenerate loop).
667 Note that if we mistakenly think that a loop is entered at the top
668 when, in fact, it is entered at the exit test, the only effect will be
669 slightly poorer optimization. Making the opposite error can generate
670 incorrect code. Since very few loops now start with a jump to the
671 exit test, the code here to detect that case is very conservative. */
673 for (p = NEXT_INSN (loop_start);
674 p != end
675 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
676 && (GET_CODE (p) != NOTE
677 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
678 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
679 p = NEXT_INSN (p))
682 scan_start = p;
684 /* Set up variables describing this loop. */
685 prescan_loop (loop_start, end, loop_info);
686 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
688 /* If loop has a jump before the first label,
689 the true entry is the target of that jump.
690 Start scan from there.
691 But record in LOOP_TOP the place where the end-test jumps
692 back to so we can scan that after the end of the loop. */
693 if (GET_CODE (p) == JUMP_INSN)
695 loop_entry_jump = p;
697 /* Loop entry must be unconditional jump (and not a RETURN) */
698 if (simplejump_p (p)
699 && JUMP_LABEL (p) != 0
700 /* Check to see whether the jump actually
701 jumps out of the loop (meaning it's no loop).
702 This case can happen for things like
703 do {..} while (0). If this label was generated previously
704 by loop, we can't tell anything about it and have to reject
705 the loop. */
706 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
708 loop_top = next_label (scan_start);
709 scan_start = JUMP_LABEL (p);
713 /* If SCAN_START was an insn created by loop, we don't know its luid
714 as required by loop_reg_used_before_p. So skip such loops. (This
715 test may never be true, but it's best to play it safe.)
717 Also, skip loops where we do not start scanning at a label. This
718 test also rejects loops starting with a JUMP_INSN that failed the
719 test above. */
721 if (INSN_UID (scan_start) >= max_uid_for_loop
722 || GET_CODE (scan_start) != CODE_LABEL)
724 if (loop_dump_stream)
725 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
726 INSN_UID (loop_start), INSN_UID (end));
727 return;
730 /* Count number of times each reg is set during this loop.
731 Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
732 the setting of register I. Set VARRAY_RTX (reg_single_usage, I). */
734 /* Allocate extra space for REGS that might be created by
735 load_mems. We allocate a little extra slop as well, in the hopes
736 that even after the moving of movables creates some new registers
737 we won't have to reallocate these arrays. However, we do grow
738 the arrays, if necessary, in load_mems_recount_loop_regs_set. */
739 nregs = max_reg_num () + loop_mems_idx + 16;
740 VARRAY_INT_INIT (set_in_loop, nregs, "set_in_loop");
741 VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
742 VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
743 VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
745 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
746 may_not_optimize, reg_single_usage, &insn_count, nregs);
748 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
750 VARRAY_CHAR (may_not_optimize, i) = 1;
751 VARRAY_INT (set_in_loop, i) = 1;
754 #ifdef AVOID_CCMODE_COPIES
755 /* Don't try to move insns which set CC registers if we should not
756 create CCmode register copies. */
757 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
758 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
759 VARRAY_CHAR (may_not_optimize, i) = 1;
760 #endif
762 bcopy ((char *) &set_in_loop->data,
763 (char *) &n_times_set->data, nregs * sizeof (int));
765 if (loop_dump_stream)
767 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
768 INSN_UID (loop_start), INSN_UID (end), insn_count);
769 if (loop_info->cont)
770 fprintf (loop_dump_stream, "Continue at insn %d.\n",
771 INSN_UID (loop_info->cont));
774 /* Scan through the loop finding insns that are safe to move.
775 Set set_in_loop negative for the reg being set, so that
776 this reg will be considered invariant for subsequent insns.
777 We consider whether subsequent insns use the reg
778 in deciding whether it is worth actually moving.
780 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
781 and therefore it is possible that the insns we are scanning
782 would never be executed. At such times, we must make sure
783 that it is safe to execute the insn once instead of zero times.
784 When MAYBE_NEVER is 0, all insns will be executed at least once
785 so that is not a problem. */
787 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
788 p != NULL_RTX;
789 p = next_insn_in_loop (p, scan_start, end, loop_top))
791 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
792 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
793 in_libcall = 1;
794 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
795 && find_reg_note (p, REG_RETVAL, NULL_RTX))
796 in_libcall = 0;
798 if (GET_CODE (p) == INSN
799 && (set = single_set (p))
800 && GET_CODE (SET_DEST (set)) == REG
801 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
803 int tem1 = 0;
804 int tem2 = 0;
805 int move_insn = 0;
806 rtx src = SET_SRC (set);
807 rtx dependencies = 0;
809 /* Figure out what to use as a source of this insn. If a REG_EQUIV
810 note is given or if a REG_EQUAL note with a constant operand is
811 specified, use it as the source and mark that we should move
812 this insn by calling emit_move_insn rather that duplicating the
813 insn.
815 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
816 is present. */
817 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
818 if (temp)
819 src = XEXP (temp, 0), move_insn = 1;
820 else
822 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
823 if (temp && CONSTANT_P (XEXP (temp, 0)))
824 src = XEXP (temp, 0), move_insn = 1;
825 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
827 src = XEXP (temp, 0);
828 /* A libcall block can use regs that don't appear in
829 the equivalent expression. To move the libcall,
830 we must move those regs too. */
831 dependencies = libcall_other_reg (p, src);
835 /* Don't try to optimize a register that was made
836 by loop-optimization for an inner loop.
837 We don't know its life-span, so we can't compute the benefit. */
838 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
840 else if (/* The register is used in basic blocks other
841 than the one where it is set (meaning that
842 something after this point in the loop might
843 depend on its value before the set). */
844 ! reg_in_basic_block_p (p, SET_DEST (set))
845 /* And the set is not guaranteed to be executed one
846 the loop starts, or the value before the set is
847 needed before the set occurs...
849 ??? Note we have quadratic behaviour here, mitigated
850 by the fact that the previous test will often fail for
851 large loops. Rather than re-scanning the entire loop
852 each time for register usage, we should build tables
853 of the register usage and use them here instead. */
854 && (maybe_never
855 || loop_reg_used_before_p (set, p, loop_start,
856 scan_start, end)))
857 /* It is unsafe to move the set.
859 This code used to consider it OK to move a set of a variable
860 which was not created by the user and not used in an exit test.
861 That behavior is incorrect and was removed. */
863 else if ((tem = invariant_p (src))
864 && (dependencies == 0
865 || (tem2 = invariant_p (dependencies)) != 0)
866 && (VARRAY_INT (set_in_loop,
867 REGNO (SET_DEST (set))) == 1
868 || (tem1
869 = consec_sets_invariant_p
870 (SET_DEST (set),
871 VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))),
872 p)))
873 /* If the insn can cause a trap (such as divide by zero),
874 can't move it unless it's guaranteed to be executed
875 once loop is entered. Even a function call might
876 prevent the trap insn from being reached
877 (since it might exit!) */
878 && ! ((maybe_never || call_passed)
879 && may_trap_p (src)))
881 register struct movable *m;
882 register int regno = REGNO (SET_DEST (set));
884 /* A potential lossage is where we have a case where two insns
885 can be combined as long as they are both in the loop, but
886 we move one of them outside the loop. For large loops,
887 this can lose. The most common case of this is the address
888 of a function being called.
890 Therefore, if this register is marked as being used exactly
891 once if we are in a loop with calls (a "large loop"), see if
892 we can replace the usage of this register with the source
893 of this SET. If we can, delete this insn.
895 Don't do this if P has a REG_RETVAL note or if we have
896 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
898 if (loop_info->has_call
899 && VARRAY_RTX (reg_single_usage, regno) != 0
900 && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
901 && REGNO_FIRST_UID (regno) == INSN_UID (p)
902 && (REGNO_LAST_UID (regno)
903 == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
904 && VARRAY_INT (set_in_loop, regno) == 1
905 && ! side_effects_p (SET_SRC (set))
906 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
907 && (! SMALL_REGISTER_CLASSES
908 || (! (GET_CODE (SET_SRC (set)) == REG
909 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
910 /* This test is not redundant; SET_SRC (set) might be
911 a call-clobbered register and the life of REGNO
912 might span a call. */
913 && ! modified_between_p (SET_SRC (set), p,
914 VARRAY_RTX
915 (reg_single_usage, regno))
916 && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
917 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
918 VARRAY_RTX
919 (reg_single_usage, regno)))
921 /* Replace any usage in a REG_EQUAL note. Must copy the
922 new source, so that we don't get rtx sharing between the
923 SET_SOURCE and REG_NOTES of insn p. */
924 REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
925 = replace_rtx (REG_NOTES (VARRAY_RTX
926 (reg_single_usage, regno)),
927 SET_DEST (set), copy_rtx (SET_SRC (set)));
929 PUT_CODE (p, NOTE);
930 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
931 NOTE_SOURCE_FILE (p) = 0;
932 VARRAY_INT (set_in_loop, regno) = 0;
933 continue;
936 m = (struct movable *) alloca (sizeof (struct movable));
937 m->next = 0;
938 m->insn = p;
939 m->set_src = src;
940 m->dependencies = dependencies;
941 m->set_dest = SET_DEST (set);
942 m->force = 0;
943 m->consec = VARRAY_INT (set_in_loop,
944 REGNO (SET_DEST (set))) - 1;
945 m->done = 0;
946 m->forces = 0;
947 m->partial = 0;
948 m->move_insn = move_insn;
949 m->move_insn_first = 0;
950 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
951 m->savemode = VOIDmode;
952 m->regno = regno;
953 /* Set M->cond if either invariant_p or consec_sets_invariant_p
954 returned 2 (only conditionally invariant). */
955 m->cond = ((tem | tem1 | tem2) > 1);
956 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
957 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
958 m->match = 0;
959 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
960 - uid_luid[REGNO_FIRST_UID (regno)]);
961 m->savings = VARRAY_INT (n_times_set, regno);
962 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
963 m->savings += libcall_benefit (p);
964 VARRAY_INT (set_in_loop, regno) = move_insn ? -2 : -1;
965 /* Add M to the end of the chain MOVABLES. */
966 if (movables == 0)
967 movables = m;
968 else
969 last_movable->next = m;
970 last_movable = m;
972 if (m->consec > 0)
974 /* It is possible for the first instruction to have a
975 REG_EQUAL note but a non-invariant SET_SRC, so we must
976 remember the status of the first instruction in case
977 the last instruction doesn't have a REG_EQUAL note. */
978 m->move_insn_first = m->move_insn;
980 /* Skip this insn, not checking REG_LIBCALL notes. */
981 p = next_nonnote_insn (p);
982 /* Skip the consecutive insns, if there are any. */
983 p = skip_consec_insns (p, m->consec);
984 /* Back up to the last insn of the consecutive group. */
985 p = prev_nonnote_insn (p);
987 /* We must now reset m->move_insn, m->is_equiv, and possibly
988 m->set_src to correspond to the effects of all the
989 insns. */
990 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
991 if (temp)
992 m->set_src = XEXP (temp, 0), m->move_insn = 1;
993 else
995 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
996 if (temp && CONSTANT_P (XEXP (temp, 0)))
997 m->set_src = XEXP (temp, 0), m->move_insn = 1;
998 else
999 m->move_insn = 0;
1002 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1005 /* If this register is always set within a STRICT_LOW_PART
1006 or set to zero, then its high bytes are constant.
1007 So clear them outside the loop and within the loop
1008 just load the low bytes.
1009 We must check that the machine has an instruction to do so.
1010 Also, if the value loaded into the register
1011 depends on the same register, this cannot be done. */
1012 else if (SET_SRC (set) == const0_rtx
1013 && GET_CODE (NEXT_INSN (p)) == INSN
1014 && (set1 = single_set (NEXT_INSN (p)))
1015 && GET_CODE (set1) == SET
1016 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1017 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1018 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1019 == SET_DEST (set))
1020 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1022 register int regno = REGNO (SET_DEST (set));
1023 if (VARRAY_INT (set_in_loop, regno) == 2)
1025 register struct movable *m;
1026 m = (struct movable *) alloca (sizeof (struct movable));
1027 m->next = 0;
1028 m->insn = p;
1029 m->set_dest = SET_DEST (set);
1030 m->dependencies = 0;
1031 m->force = 0;
1032 m->consec = 0;
1033 m->done = 0;
1034 m->forces = 0;
1035 m->move_insn = 0;
1036 m->move_insn_first = 0;
1037 m->partial = 1;
1038 /* If the insn may not be executed on some cycles,
1039 we can't clear the whole reg; clear just high part.
1040 Not even if the reg is used only within this loop.
1041 Consider this:
1042 while (1)
1043 while (s != t) {
1044 if (foo ()) x = *s;
1045 use (x);
1047 Clearing x before the inner loop could clobber a value
1048 being saved from the last time around the outer loop.
1049 However, if the reg is not used outside this loop
1050 and all uses of the register are in the same
1051 basic block as the store, there is no problem.
1053 If this insn was made by loop, we don't know its
1054 INSN_LUID and hence must make a conservative
1055 assumption. */
1056 m->global = (INSN_UID (p) >= max_uid_for_loop
1057 || (uid_luid[REGNO_LAST_UID (regno)]
1058 > INSN_LUID (end))
1059 || (uid_luid[REGNO_FIRST_UID (regno)]
1060 < INSN_LUID (p))
1061 || (labels_in_range_p
1062 (p, uid_luid[REGNO_FIRST_UID (regno)])));
1063 if (maybe_never && m->global)
1064 m->savemode = GET_MODE (SET_SRC (set1));
1065 else
1066 m->savemode = VOIDmode;
1067 m->regno = regno;
1068 m->cond = 0;
1069 m->match = 0;
1070 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1071 - uid_luid[REGNO_FIRST_UID (regno)]);
1072 m->savings = 1;
1073 VARRAY_INT (set_in_loop, regno) = -1;
1074 /* Add M to the end of the chain MOVABLES. */
1075 if (movables == 0)
1076 movables = m;
1077 else
1078 last_movable->next = m;
1079 last_movable = m;
1083 /* Past a call insn, we get to insns which might not be executed
1084 because the call might exit. This matters for insns that trap.
1085 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1086 so they don't count. */
1087 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1088 call_passed = 1;
1089 /* Past a label or a jump, we get to insns for which we
1090 can't count on whether or how many times they will be
1091 executed during each iteration. Therefore, we can
1092 only move out sets of trivial variables
1093 (those not used after the loop). */
1094 /* Similar code appears twice in strength_reduce. */
1095 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1096 /* If we enter the loop in the middle, and scan around to the
1097 beginning, don't set maybe_never for that. This must be an
1098 unconditional jump, otherwise the code at the top of the
1099 loop might never be executed. Unconditional jumps are
1100 followed a by barrier then loop end. */
1101 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1102 && NEXT_INSN (NEXT_INSN (p)) == end
1103 && simplejump_p (p)))
1104 maybe_never = 1;
1105 else if (GET_CODE (p) == NOTE)
1107 /* At the virtual top of a converted loop, insns are again known to
1108 be executed: logically, the loop begins here even though the exit
1109 code has been duplicated. */
1110 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1111 maybe_never = call_passed = 0;
1112 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1113 loop_depth++;
1114 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1115 loop_depth--;
1119 /* If one movable subsumes another, ignore that other. */
1121 ignore_some_movables (movables);
1123 /* For each movable insn, see if the reg that it loads
1124 leads when it dies right into another conditionally movable insn.
1125 If so, record that the second insn "forces" the first one,
1126 since the second can be moved only if the first is. */
1128 force_movables (movables);
1130 /* See if there are multiple movable insns that load the same value.
1131 If there are, make all but the first point at the first one
1132 through the `match' field, and add the priorities of them
1133 all together as the priority of the first. */
1135 combine_movables (movables, nregs);
1137 /* Now consider each movable insn to decide whether it is worth moving.
1138 Store 0 in set_in_loop for each reg that is moved.
1140 Generally this increases code size, so do not move moveables when
1141 optimizing for code size. */
1143 if (! optimize_size)
1144 move_movables (movables, threshold,
1145 insn_count, loop_start, end, nregs);
1147 /* Now candidates that still are negative are those not moved.
1148 Change set_in_loop to indicate that those are not actually invariant. */
1149 for (i = 0; i < nregs; i++)
1150 if (VARRAY_INT (set_in_loop, i) < 0)
1151 VARRAY_INT (set_in_loop, i) = VARRAY_INT (n_times_set, i);
1153 /* Now that we've moved some things out of the loop, we might be able to
1154 hoist even more memory references. */
1155 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
1156 loop_start, &insn_count);
1158 if (flag_strength_reduce)
1160 the_movables = movables;
1161 strength_reduce (scan_start, end, loop_top,
1162 insn_count, loop_start, end,
1163 loop_info, loop_cont, unroll_p, bct_p);
1166 VARRAY_FREE (reg_single_usage);
1167 VARRAY_FREE (set_in_loop);
1168 VARRAY_FREE (n_times_set);
1169 VARRAY_FREE (may_not_optimize);
1172 /* Add elements to *OUTPUT to record all the pseudo-regs
1173 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1175 void
1176 record_excess_regs (in_this, not_in_this, output)
1177 rtx in_this, not_in_this;
1178 rtx *output;
1180 enum rtx_code code;
1181 const char *fmt;
1182 int i;
1184 code = GET_CODE (in_this);
1186 switch (code)
1188 case PC:
1189 case CC0:
1190 case CONST_INT:
1191 case CONST_DOUBLE:
1192 case CONST:
1193 case SYMBOL_REF:
1194 case LABEL_REF:
1195 return;
1197 case REG:
1198 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1199 && ! reg_mentioned_p (in_this, not_in_this))
1200 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1201 return;
1203 default:
1204 break;
1207 fmt = GET_RTX_FORMAT (code);
1208 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1210 int j;
1212 switch (fmt[i])
1214 case 'E':
1215 for (j = 0; j < XVECLEN (in_this, i); j++)
1216 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1217 break;
1219 case 'e':
1220 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1221 break;
1226 /* Check what regs are referred to in the libcall block ending with INSN,
1227 aside from those mentioned in the equivalent value.
1228 If there are none, return 0.
1229 If there are one or more, return an EXPR_LIST containing all of them. */
1232 libcall_other_reg (insn, equiv)
1233 rtx insn, equiv;
1235 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1236 rtx p = XEXP (note, 0);
1237 rtx output = 0;
1239 /* First, find all the regs used in the libcall block
1240 that are not mentioned as inputs to the result. */
1242 while (p != insn)
1244 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1245 || GET_CODE (p) == CALL_INSN)
1246 record_excess_regs (PATTERN (p), equiv, &output);
1247 p = NEXT_INSN (p);
1250 return output;
1253 /* Return 1 if all uses of REG
1254 are between INSN and the end of the basic block. */
1256 static int
1257 reg_in_basic_block_p (insn, reg)
1258 rtx insn, reg;
1260 int regno = REGNO (reg);
1261 rtx p;
1263 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1264 return 0;
1266 /* Search this basic block for the already recorded last use of the reg. */
1267 for (p = insn; p; p = NEXT_INSN (p))
1269 switch (GET_CODE (p))
1271 case NOTE:
1272 break;
1274 case INSN:
1275 case CALL_INSN:
1276 /* Ordinary insn: if this is the last use, we win. */
1277 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1278 return 1;
1279 break;
1281 case JUMP_INSN:
1282 /* Jump insn: if this is the last use, we win. */
1283 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1284 return 1;
1285 /* Otherwise, it's the end of the basic block, so we lose. */
1286 return 0;
1288 case CODE_LABEL:
1289 case BARRIER:
1290 /* It's the end of the basic block, so we lose. */
1291 return 0;
1293 default:
1294 break;
1298 /* The "last use" doesn't follow the "first use"?? */
1299 abort ();
1302 /* Compute the benefit of eliminating the insns in the block whose
1303 last insn is LAST. This may be a group of insns used to compute a
1304 value directly or can contain a library call. */
1306 static int
1307 libcall_benefit (last)
1308 rtx last;
1310 rtx insn;
1311 int benefit = 0;
1313 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1314 insn != last; insn = NEXT_INSN (insn))
1316 if (GET_CODE (insn) == CALL_INSN)
1317 benefit += 10; /* Assume at least this many insns in a library
1318 routine. */
1319 else if (GET_CODE (insn) == INSN
1320 && GET_CODE (PATTERN (insn)) != USE
1321 && GET_CODE (PATTERN (insn)) != CLOBBER)
1322 benefit++;
1325 return benefit;
1328 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1330 static rtx
1331 skip_consec_insns (insn, count)
1332 rtx insn;
1333 int count;
1335 for (; count > 0; count--)
1337 rtx temp;
1339 /* If first insn of libcall sequence, skip to end. */
1340 /* Do this at start of loop, since INSN is guaranteed to
1341 be an insn here. */
1342 if (GET_CODE (insn) != NOTE
1343 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1344 insn = XEXP (temp, 0);
1346 do insn = NEXT_INSN (insn);
1347 while (GET_CODE (insn) == NOTE);
1350 return insn;
1353 /* Ignore any movable whose insn falls within a libcall
1354 which is part of another movable.
1355 We make use of the fact that the movable for the libcall value
1356 was made later and so appears later on the chain. */
1358 static void
1359 ignore_some_movables (movables)
1360 struct movable *movables;
1362 register struct movable *m, *m1;
1364 for (m = movables; m; m = m->next)
1366 /* Is this a movable for the value of a libcall? */
1367 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1368 if (note)
1370 rtx insn;
1371 /* Check for earlier movables inside that range,
1372 and mark them invalid. We cannot use LUIDs here because
1373 insns created by loop.c for prior loops don't have LUIDs.
1374 Rather than reject all such insns from movables, we just
1375 explicitly check each insn in the libcall (since invariant
1376 libcalls aren't that common). */
1377 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1378 for (m1 = movables; m1 != m; m1 = m1->next)
1379 if (m1->insn == insn)
1380 m1->done = 1;
1385 /* For each movable insn, see if the reg that it loads
1386 leads when it dies right into another conditionally movable insn.
1387 If so, record that the second insn "forces" the first one,
1388 since the second can be moved only if the first is. */
1390 static void
1391 force_movables (movables)
1392 struct movable *movables;
1394 register struct movable *m, *m1;
1395 for (m1 = movables; m1; m1 = m1->next)
1396 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1397 if (!m1->partial && !m1->done)
1399 int regno = m1->regno;
1400 for (m = m1->next; m; m = m->next)
1401 /* ??? Could this be a bug? What if CSE caused the
1402 register of M1 to be used after this insn?
1403 Since CSE does not update regno_last_uid,
1404 this insn M->insn might not be where it dies.
1405 But very likely this doesn't matter; what matters is
1406 that M's reg is computed from M1's reg. */
1407 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1408 && !m->done)
1409 break;
1410 if (m != 0 && m->set_src == m1->set_dest
1411 /* If m->consec, m->set_src isn't valid. */
1412 && m->consec == 0)
1413 m = 0;
1415 /* Increase the priority of the moving the first insn
1416 since it permits the second to be moved as well. */
1417 if (m != 0)
1419 m->forces = m1;
1420 m1->lifetime += m->lifetime;
1421 m1->savings += m->savings;
1426 /* Find invariant expressions that are equal and can be combined into
1427 one register. */
1429 static void
1430 combine_movables (movables, nregs)
1431 struct movable *movables;
1432 int nregs;
1434 register struct movable *m;
1435 char *matched_regs = (char *) alloca (nregs);
1436 enum machine_mode mode;
1438 /* Regs that are set more than once are not allowed to match
1439 or be matched. I'm no longer sure why not. */
1440 /* Perhaps testing m->consec_sets would be more appropriate here? */
1442 for (m = movables; m; m = m->next)
1443 if (m->match == 0 && VARRAY_INT (n_times_set, m->regno) == 1 && !m->partial)
1445 register struct movable *m1;
1446 int regno = m->regno;
1448 bzero (matched_regs, nregs);
1449 matched_regs[regno] = 1;
1451 /* We want later insns to match the first one. Don't make the first
1452 one match any later ones. So start this loop at m->next. */
1453 for (m1 = m->next; m1; m1 = m1->next)
1454 if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_set, m1->regno) == 1
1455 /* A reg used outside the loop mustn't be eliminated. */
1456 && !m1->global
1457 /* A reg used for zero-extending mustn't be eliminated. */
1458 && !m1->partial
1459 && (matched_regs[m1->regno]
1462 /* Can combine regs with different modes loaded from the
1463 same constant only if the modes are the same or
1464 if both are integer modes with M wider or the same
1465 width as M1. The check for integer is redundant, but
1466 safe, since the only case of differing destination
1467 modes with equal sources is when both sources are
1468 VOIDmode, i.e., CONST_INT. */
1469 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1470 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1471 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1472 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1473 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1474 /* See if the source of M1 says it matches M. */
1475 && ((GET_CODE (m1->set_src) == REG
1476 && matched_regs[REGNO (m1->set_src)])
1477 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1478 movables))))
1479 && ((m->dependencies == m1->dependencies)
1480 || rtx_equal_p (m->dependencies, m1->dependencies)))
1482 m->lifetime += m1->lifetime;
1483 m->savings += m1->savings;
1484 m1->done = 1;
1485 m1->match = m;
1486 matched_regs[m1->regno] = 1;
1490 /* Now combine the regs used for zero-extension.
1491 This can be done for those not marked `global'
1492 provided their lives don't overlap. */
1494 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1495 mode = GET_MODE_WIDER_MODE (mode))
1497 register struct movable *m0 = 0;
1499 /* Combine all the registers for extension from mode MODE.
1500 Don't combine any that are used outside this loop. */
1501 for (m = movables; m; m = m->next)
1502 if (m->partial && ! m->global
1503 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1505 register struct movable *m1;
1506 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1507 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1509 if (m0 == 0)
1511 /* First one: don't check for overlap, just record it. */
1512 m0 = m;
1513 continue;
1516 /* Make sure they extend to the same mode.
1517 (Almost always true.) */
1518 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1519 continue;
1521 /* We already have one: check for overlap with those
1522 already combined together. */
1523 for (m1 = movables; m1 != m; m1 = m1->next)
1524 if (m1 == m0 || (m1->partial && m1->match == m0))
1525 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1526 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1527 goto overlap;
1529 /* No overlap: we can combine this with the others. */
1530 m0->lifetime += m->lifetime;
1531 m0->savings += m->savings;
1532 m->done = 1;
1533 m->match = m0;
1535 overlap: ;
1540 /* Return 1 if regs X and Y will become the same if moved. */
1542 static int
1543 regs_match_p (x, y, movables)
1544 rtx x, y;
1545 struct movable *movables;
1547 int xn = REGNO (x);
1548 int yn = REGNO (y);
1549 struct movable *mx, *my;
1551 for (mx = movables; mx; mx = mx->next)
1552 if (mx->regno == xn)
1553 break;
1555 for (my = movables; my; my = my->next)
1556 if (my->regno == yn)
1557 break;
1559 return (mx && my
1560 && ((mx->match == my->match && mx->match != 0)
1561 || mx->match == my
1562 || mx == my->match));
1565 /* Return 1 if X and Y are identical-looking rtx's.
1566 This is the Lisp function EQUAL for rtx arguments.
1568 If two registers are matching movables or a movable register and an
1569 equivalent constant, consider them equal. */
1571 static int
1572 rtx_equal_for_loop_p (x, y, movables)
1573 rtx x, y;
1574 struct movable *movables;
1576 register int i;
1577 register int j;
1578 register struct movable *m;
1579 register enum rtx_code code;
1580 register const char *fmt;
1582 if (x == y)
1583 return 1;
1584 if (x == 0 || y == 0)
1585 return 0;
1587 code = GET_CODE (x);
1589 /* If we have a register and a constant, they may sometimes be
1590 equal. */
1591 if (GET_CODE (x) == REG && VARRAY_INT (set_in_loop, REGNO (x)) == -2
1592 && CONSTANT_P (y))
1594 for (m = movables; m; m = m->next)
1595 if (m->move_insn && m->regno == REGNO (x)
1596 && rtx_equal_p (m->set_src, y))
1597 return 1;
1599 else if (GET_CODE (y) == REG && VARRAY_INT (set_in_loop, REGNO (y)) == -2
1600 && CONSTANT_P (x))
1602 for (m = movables; m; m = m->next)
1603 if (m->move_insn && m->regno == REGNO (y)
1604 && rtx_equal_p (m->set_src, x))
1605 return 1;
1608 /* Otherwise, rtx's of different codes cannot be equal. */
1609 if (code != GET_CODE (y))
1610 return 0;
1612 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1613 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1615 if (GET_MODE (x) != GET_MODE (y))
1616 return 0;
1618 /* These three types of rtx's can be compared nonrecursively. */
1619 if (code == REG)
1620 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1622 if (code == LABEL_REF)
1623 return XEXP (x, 0) == XEXP (y, 0);
1624 if (code == SYMBOL_REF)
1625 return XSTR (x, 0) == XSTR (y, 0);
1627 /* Compare the elements. If any pair of corresponding elements
1628 fail to match, return 0 for the whole things. */
1630 fmt = GET_RTX_FORMAT (code);
1631 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1633 switch (fmt[i])
1635 case 'w':
1636 if (XWINT (x, i) != XWINT (y, i))
1637 return 0;
1638 break;
1640 case 'i':
1641 if (XINT (x, i) != XINT (y, i))
1642 return 0;
1643 break;
1645 case 'E':
1646 /* Two vectors must have the same length. */
1647 if (XVECLEN (x, i) != XVECLEN (y, i))
1648 return 0;
1650 /* And the corresponding elements must match. */
1651 for (j = 0; j < XVECLEN (x, i); j++)
1652 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1653 return 0;
1654 break;
1656 case 'e':
1657 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1658 return 0;
1659 break;
1661 case 's':
1662 if (strcmp (XSTR (x, i), XSTR (y, i)))
1663 return 0;
1664 break;
1666 case 'u':
1667 /* These are just backpointers, so they don't matter. */
1668 break;
1670 case '0':
1671 break;
1673 /* It is believed that rtx's at this level will never
1674 contain anything but integers and other rtx's,
1675 except for within LABEL_REFs and SYMBOL_REFs. */
1676 default:
1677 abort ();
1680 return 1;
1683 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1684 insns in INSNS which use the reference. */
1686 static void
1687 add_label_notes (x, insns)
1688 rtx x;
1689 rtx insns;
1691 enum rtx_code code = GET_CODE (x);
1692 int i, j;
1693 const char *fmt;
1694 rtx insn;
1696 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1698 /* This code used to ignore labels that referred to dispatch tables to
1699 avoid flow generating (slighly) worse code.
1701 We no longer ignore such label references (see LABEL_REF handling in
1702 mark_jump_label for additional information). */
1703 for (insn = insns; insn; insn = NEXT_INSN (insn))
1704 if (reg_mentioned_p (XEXP (x, 0), insn))
1705 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1706 REG_NOTES (insn));
1709 fmt = GET_RTX_FORMAT (code);
1710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1712 if (fmt[i] == 'e')
1713 add_label_notes (XEXP (x, i), insns);
1714 else if (fmt[i] == 'E')
1715 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1716 add_label_notes (XVECEXP (x, i, j), insns);
1720 /* Scan MOVABLES, and move the insns that deserve to be moved.
1721 If two matching movables are combined, replace one reg with the
1722 other throughout. */
1724 static void
1725 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1726 struct movable *movables;
1727 int threshold;
1728 int insn_count;
1729 rtx loop_start;
1730 rtx end;
1731 int nregs;
1733 rtx new_start = 0;
1734 register struct movable *m;
1735 register rtx p;
1736 /* Map of pseudo-register replacements to handle combining
1737 when we move several insns that load the same value
1738 into different pseudo-registers. */
1739 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1740 char *already_moved = (char *) alloca (nregs);
1742 bzero (already_moved, nregs);
1743 bzero ((char *) reg_map, nregs * sizeof (rtx));
1745 num_movables = 0;
1747 for (m = movables; m; m = m->next)
1749 /* Describe this movable insn. */
1751 if (loop_dump_stream)
1753 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1754 INSN_UID (m->insn), m->regno, m->lifetime);
1755 if (m->consec > 0)
1756 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1757 if (m->cond)
1758 fprintf (loop_dump_stream, "cond ");
1759 if (m->force)
1760 fprintf (loop_dump_stream, "force ");
1761 if (m->global)
1762 fprintf (loop_dump_stream, "global ");
1763 if (m->done)
1764 fprintf (loop_dump_stream, "done ");
1765 if (m->move_insn)
1766 fprintf (loop_dump_stream, "move-insn ");
1767 if (m->match)
1768 fprintf (loop_dump_stream, "matches %d ",
1769 INSN_UID (m->match->insn));
1770 if (m->forces)
1771 fprintf (loop_dump_stream, "forces %d ",
1772 INSN_UID (m->forces->insn));
1775 /* Count movables. Value used in heuristics in strength_reduce. */
1776 num_movables++;
1778 /* Ignore the insn if it's already done (it matched something else).
1779 Otherwise, see if it is now safe to move. */
1781 if (!m->done
1782 && (! m->cond
1783 || (1 == invariant_p (m->set_src)
1784 && (m->dependencies == 0
1785 || 1 == invariant_p (m->dependencies))
1786 && (m->consec == 0
1787 || 1 == consec_sets_invariant_p (m->set_dest,
1788 m->consec + 1,
1789 m->insn))))
1790 && (! m->forces || m->forces->done))
1792 register int regno;
1793 register rtx p;
1794 int savings = m->savings;
1796 /* We have an insn that is safe to move.
1797 Compute its desirability. */
1799 p = m->insn;
1800 regno = m->regno;
1802 if (loop_dump_stream)
1803 fprintf (loop_dump_stream, "savings %d ", savings);
1805 if (moved_once[regno] && loop_dump_stream)
1806 fprintf (loop_dump_stream, "halved since already moved ");
1808 /* An insn MUST be moved if we already moved something else
1809 which is safe only if this one is moved too: that is,
1810 if already_moved[REGNO] is nonzero. */
1812 /* An insn is desirable to move if the new lifetime of the
1813 register is no more than THRESHOLD times the old lifetime.
1814 If it's not desirable, it means the loop is so big
1815 that moving won't speed things up much,
1816 and it is liable to make register usage worse. */
1818 /* It is also desirable to move if it can be moved at no
1819 extra cost because something else was already moved. */
1821 if (already_moved[regno]
1822 || flag_move_all_movables
1823 || (threshold * savings * m->lifetime) >=
1824 (moved_once[regno] ? insn_count * 2 : insn_count)
1825 || (m->forces && m->forces->done
1826 && VARRAY_INT (n_times_set, m->forces->regno) == 1))
1828 int count;
1829 register struct movable *m1;
1830 rtx first = NULL_RTX;
1832 /* Now move the insns that set the reg. */
1834 if (m->partial && m->match)
1836 rtx newpat, i1;
1837 rtx r1, r2;
1838 /* Find the end of this chain of matching regs.
1839 Thus, we load each reg in the chain from that one reg.
1840 And that reg is loaded with 0 directly,
1841 since it has ->match == 0. */
1842 for (m1 = m; m1->match; m1 = m1->match);
1843 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1844 SET_DEST (PATTERN (m1->insn)));
1845 i1 = emit_insn_before (newpat, loop_start);
1847 /* Mark the moved, invariant reg as being allowed to
1848 share a hard reg with the other matching invariant. */
1849 REG_NOTES (i1) = REG_NOTES (m->insn);
1850 r1 = SET_DEST (PATTERN (m->insn));
1851 r2 = SET_DEST (PATTERN (m1->insn));
1852 regs_may_share
1853 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1854 gen_rtx_EXPR_LIST (VOIDmode, r2,
1855 regs_may_share));
1856 delete_insn (m->insn);
1858 if (new_start == 0)
1859 new_start = i1;
1861 if (loop_dump_stream)
1862 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1864 /* If we are to re-generate the item being moved with a
1865 new move insn, first delete what we have and then emit
1866 the move insn before the loop. */
1867 else if (m->move_insn)
1869 rtx i1, temp;
1871 for (count = m->consec; count >= 0; count--)
1873 /* If this is the first insn of a library call sequence,
1874 skip to the end. */
1875 if (GET_CODE (p) != NOTE
1876 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1877 p = XEXP (temp, 0);
1879 /* If this is the last insn of a libcall sequence, then
1880 delete every insn in the sequence except the last.
1881 The last insn is handled in the normal manner. */
1882 if (GET_CODE (p) != NOTE
1883 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1885 temp = XEXP (temp, 0);
1886 while (temp != p)
1887 temp = delete_insn (temp);
1890 temp = p;
1891 p = delete_insn (p);
1893 /* simplify_giv_expr expects that it can walk the insns
1894 at m->insn forwards and see this old sequence we are
1895 tossing here. delete_insn does preserve the next
1896 pointers, but when we skip over a NOTE we must fix
1897 it up. Otherwise that code walks into the non-deleted
1898 insn stream. */
1899 while (p && GET_CODE (p) == NOTE)
1900 p = NEXT_INSN (temp) = NEXT_INSN (p);
1903 start_sequence ();
1904 emit_move_insn (m->set_dest, m->set_src);
1905 temp = get_insns ();
1906 end_sequence ();
1908 add_label_notes (m->set_src, temp);
1910 i1 = emit_insns_before (temp, loop_start);
1911 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1912 REG_NOTES (i1)
1913 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1914 m->set_src, REG_NOTES (i1));
1916 if (loop_dump_stream)
1917 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1919 /* The more regs we move, the less we like moving them. */
1920 threshold -= 3;
1922 else
1924 for (count = m->consec; count >= 0; count--)
1926 rtx i1, temp;
1928 /* If first insn of libcall sequence, skip to end. */
1929 /* Do this at start of loop, since p is guaranteed to
1930 be an insn here. */
1931 if (GET_CODE (p) != NOTE
1932 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1933 p = XEXP (temp, 0);
1935 /* If last insn of libcall sequence, move all
1936 insns except the last before the loop. The last
1937 insn is handled in the normal manner. */
1938 if (GET_CODE (p) != NOTE
1939 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1941 rtx fn_address = 0;
1942 rtx fn_reg = 0;
1943 rtx fn_address_insn = 0;
1945 first = 0;
1946 for (temp = XEXP (temp, 0); temp != p;
1947 temp = NEXT_INSN (temp))
1949 rtx body;
1950 rtx n;
1951 rtx next;
1953 if (GET_CODE (temp) == NOTE)
1954 continue;
1956 body = PATTERN (temp);
1958 /* Find the next insn after TEMP,
1959 not counting USE or NOTE insns. */
1960 for (next = NEXT_INSN (temp); next != p;
1961 next = NEXT_INSN (next))
1962 if (! (GET_CODE (next) == INSN
1963 && GET_CODE (PATTERN (next)) == USE)
1964 && GET_CODE (next) != NOTE)
1965 break;
1967 /* If that is the call, this may be the insn
1968 that loads the function address.
1970 Extract the function address from the insn
1971 that loads it into a register.
1972 If this insn was cse'd, we get incorrect code.
1974 So emit a new move insn that copies the
1975 function address into the register that the
1976 call insn will use. flow.c will delete any
1977 redundant stores that we have created. */
1978 if (GET_CODE (next) == CALL_INSN
1979 && GET_CODE (body) == SET
1980 && GET_CODE (SET_DEST (body)) == REG
1981 && (n = find_reg_note (temp, REG_EQUAL,
1982 NULL_RTX)))
1984 fn_reg = SET_SRC (body);
1985 if (GET_CODE (fn_reg) != REG)
1986 fn_reg = SET_DEST (body);
1987 fn_address = XEXP (n, 0);
1988 fn_address_insn = temp;
1990 /* We have the call insn.
1991 If it uses the register we suspect it might,
1992 load it with the correct address directly. */
1993 if (GET_CODE (temp) == CALL_INSN
1994 && fn_address != 0
1995 && reg_referenced_p (fn_reg, body))
1996 emit_insn_after (gen_move_insn (fn_reg,
1997 fn_address),
1998 fn_address_insn);
2000 if (GET_CODE (temp) == CALL_INSN)
2002 i1 = emit_call_insn_before (body, loop_start);
2003 /* Because the USAGE information potentially
2004 contains objects other than hard registers
2005 we need to copy it. */
2006 if (CALL_INSN_FUNCTION_USAGE (temp))
2007 CALL_INSN_FUNCTION_USAGE (i1)
2008 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2010 else
2011 i1 = emit_insn_before (body, loop_start);
2012 if (first == 0)
2013 first = i1;
2014 if (temp == fn_address_insn)
2015 fn_address_insn = i1;
2016 REG_NOTES (i1) = REG_NOTES (temp);
2017 delete_insn (temp);
2019 if (new_start == 0)
2020 new_start = first;
2022 if (m->savemode != VOIDmode)
2024 /* P sets REG to zero; but we should clear only
2025 the bits that are not covered by the mode
2026 m->savemode. */
2027 rtx reg = m->set_dest;
2028 rtx sequence;
2029 rtx tem;
2031 start_sequence ();
2032 tem = expand_binop
2033 (GET_MODE (reg), and_optab, reg,
2034 GEN_INT ((((HOST_WIDE_INT) 1
2035 << GET_MODE_BITSIZE (m->savemode)))
2036 - 1),
2037 reg, 1, OPTAB_LIB_WIDEN);
2038 if (tem == 0)
2039 abort ();
2040 if (tem != reg)
2041 emit_move_insn (reg, tem);
2042 sequence = gen_sequence ();
2043 end_sequence ();
2044 i1 = emit_insn_before (sequence, loop_start);
2046 else if (GET_CODE (p) == CALL_INSN)
2048 i1 = emit_call_insn_before (PATTERN (p), loop_start);
2049 /* Because the USAGE information potentially
2050 contains objects other than hard registers
2051 we need to copy it. */
2052 if (CALL_INSN_FUNCTION_USAGE (p))
2053 CALL_INSN_FUNCTION_USAGE (i1)
2054 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2056 else if (count == m->consec && m->move_insn_first)
2058 /* The SET_SRC might not be invariant, so we must
2059 use the REG_EQUAL note. */
2060 start_sequence ();
2061 emit_move_insn (m->set_dest, m->set_src);
2062 temp = get_insns ();
2063 end_sequence ();
2065 add_label_notes (m->set_src, temp);
2067 i1 = emit_insns_before (temp, loop_start);
2068 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2069 REG_NOTES (i1)
2070 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
2071 : REG_EQUAL),
2072 m->set_src, REG_NOTES (i1));
2074 else
2075 i1 = emit_insn_before (PATTERN (p), loop_start);
2077 if (REG_NOTES (i1) == 0)
2079 REG_NOTES (i1) = REG_NOTES (p);
2081 /* If there is a REG_EQUAL note present whose value
2082 is not loop invariant, then delete it, since it
2083 may cause problems with later optimization passes.
2084 It is possible for cse to create such notes
2085 like this as a result of record_jump_cond. */
2087 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2088 && ! invariant_p (XEXP (temp, 0)))
2089 remove_note (i1, temp);
2092 if (new_start == 0)
2093 new_start = i1;
2095 if (loop_dump_stream)
2096 fprintf (loop_dump_stream, " moved to %d",
2097 INSN_UID (i1));
2099 /* If library call, now fix the REG_NOTES that contain
2100 insn pointers, namely REG_LIBCALL on FIRST
2101 and REG_RETVAL on I1. */
2102 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2104 XEXP (temp, 0) = first;
2105 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2106 XEXP (temp, 0) = i1;
2109 temp = p;
2110 delete_insn (p);
2111 p = NEXT_INSN (p);
2113 /* simplify_giv_expr expects that it can walk the insns
2114 at m->insn forwards and see this old sequence we are
2115 tossing here. delete_insn does preserve the next
2116 pointers, but when we skip over a NOTE we must fix
2117 it up. Otherwise that code walks into the non-deleted
2118 insn stream. */
2119 while (p && GET_CODE (p) == NOTE)
2120 p = NEXT_INSN (temp) = NEXT_INSN (p);
2123 /* The more regs we move, the less we like moving them. */
2124 threshold -= 3;
2127 /* Any other movable that loads the same register
2128 MUST be moved. */
2129 already_moved[regno] = 1;
2131 /* This reg has been moved out of one loop. */
2132 moved_once[regno] = 1;
2134 /* The reg set here is now invariant. */
2135 if (! m->partial)
2136 VARRAY_INT (set_in_loop, regno) = 0;
2138 m->done = 1;
2140 /* Change the length-of-life info for the register
2141 to say it lives at least the full length of this loop.
2142 This will help guide optimizations in outer loops. */
2144 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2145 /* This is the old insn before all the moved insns.
2146 We can't use the moved insn because it is out of range
2147 in uid_luid. Only the old insns have luids. */
2148 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2149 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2150 REGNO_LAST_UID (regno) = INSN_UID (end);
2152 /* Combine with this moved insn any other matching movables. */
2154 if (! m->partial)
2155 for (m1 = movables; m1; m1 = m1->next)
2156 if (m1->match == m)
2158 rtx temp;
2160 /* Schedule the reg loaded by M1
2161 for replacement so that shares the reg of M.
2162 If the modes differ (only possible in restricted
2163 circumstances, make a SUBREG.
2165 Note this assumes that the target dependent files
2166 treat REG and SUBREG equally, including within
2167 GO_IF_LEGITIMATE_ADDRESS and in all the
2168 predicates since we never verify that replacing the
2169 original register with a SUBREG results in a
2170 recognizable insn. */
2171 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2172 reg_map[m1->regno] = m->set_dest;
2173 else
2174 reg_map[m1->regno]
2175 = gen_lowpart_common (GET_MODE (m1->set_dest),
2176 m->set_dest);
2178 /* Get rid of the matching insn
2179 and prevent further processing of it. */
2180 m1->done = 1;
2182 /* if library call, delete all insn except last, which
2183 is deleted below */
2184 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2185 NULL_RTX)))
2187 for (temp = XEXP (temp, 0); temp != m1->insn;
2188 temp = NEXT_INSN (temp))
2189 delete_insn (temp);
2191 delete_insn (m1->insn);
2193 /* Any other movable that loads the same register
2194 MUST be moved. */
2195 already_moved[m1->regno] = 1;
2197 /* The reg merged here is now invariant,
2198 if the reg it matches is invariant. */
2199 if (! m->partial)
2200 VARRAY_INT (set_in_loop, m1->regno) = 0;
2203 else if (loop_dump_stream)
2204 fprintf (loop_dump_stream, "not desirable");
2206 else if (loop_dump_stream && !m->match)
2207 fprintf (loop_dump_stream, "not safe");
2209 if (loop_dump_stream)
2210 fprintf (loop_dump_stream, "\n");
2213 if (new_start == 0)
2214 new_start = loop_start;
2216 /* Go through all the instructions in the loop, making
2217 all the register substitutions scheduled in REG_MAP. */
2218 for (p = new_start; p != end; p = NEXT_INSN (p))
2219 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2220 || GET_CODE (p) == CALL_INSN)
2222 replace_regs (PATTERN (p), reg_map, nregs, 0);
2223 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2224 INSN_CODE (p) = -1;
2228 #if 0
2229 /* Scan X and replace the address of any MEM in it with ADDR.
2230 REG is the address that MEM should have before the replacement. */
2232 static void
2233 replace_call_address (x, reg, addr)
2234 rtx x, reg, addr;
2236 register enum rtx_code code;
2237 register int i;
2238 register const char *fmt;
2240 if (x == 0)
2241 return;
2242 code = GET_CODE (x);
2243 switch (code)
2245 case PC:
2246 case CC0:
2247 case CONST_INT:
2248 case CONST_DOUBLE:
2249 case CONST:
2250 case SYMBOL_REF:
2251 case LABEL_REF:
2252 case REG:
2253 return;
2255 case SET:
2256 /* Short cut for very common case. */
2257 replace_call_address (XEXP (x, 1), reg, addr);
2258 return;
2260 case CALL:
2261 /* Short cut for very common case. */
2262 replace_call_address (XEXP (x, 0), reg, addr);
2263 return;
2265 case MEM:
2266 /* If this MEM uses a reg other than the one we expected,
2267 something is wrong. */
2268 if (XEXP (x, 0) != reg)
2269 abort ();
2270 XEXP (x, 0) = addr;
2271 return;
2273 default:
2274 break;
2277 fmt = GET_RTX_FORMAT (code);
2278 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2280 if (fmt[i] == 'e')
2281 replace_call_address (XEXP (x, i), reg, addr);
2282 if (fmt[i] == 'E')
2284 register int j;
2285 for (j = 0; j < XVECLEN (x, i); j++)
2286 replace_call_address (XVECEXP (x, i, j), reg, addr);
2290 #endif
2292 /* Return the number of memory refs to addresses that vary
2293 in the rtx X. */
2295 static int
2296 count_nonfixed_reads (x)
2297 rtx x;
2299 register enum rtx_code code;
2300 register int i;
2301 register const char *fmt;
2302 int value;
2304 if (x == 0)
2305 return 0;
2307 code = GET_CODE (x);
2308 switch (code)
2310 case PC:
2311 case CC0:
2312 case CONST_INT:
2313 case CONST_DOUBLE:
2314 case CONST:
2315 case SYMBOL_REF:
2316 case LABEL_REF:
2317 case REG:
2318 return 0;
2320 case MEM:
2321 return ((invariant_p (XEXP (x, 0)) != 1)
2322 + count_nonfixed_reads (XEXP (x, 0)));
2324 default:
2325 break;
2328 value = 0;
2329 fmt = GET_RTX_FORMAT (code);
2330 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2332 if (fmt[i] == 'e')
2333 value += count_nonfixed_reads (XEXP (x, i));
2334 if (fmt[i] == 'E')
2336 register int j;
2337 for (j = 0; j < XVECLEN (x, i); j++)
2338 value += count_nonfixed_reads (XVECEXP (x, i, j));
2341 return value;
2345 #if 0
2346 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2347 Replace it with an instruction to load just the low bytes
2348 if the machine supports such an instruction,
2349 and insert above LOOP_START an instruction to clear the register. */
2351 static void
2352 constant_high_bytes (p, loop_start)
2353 rtx p, loop_start;
2355 register rtx new;
2356 register int insn_code_number;
2358 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2359 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2362 = gen_rtx_SET
2363 (VOIDmode,
2364 gen_rtx_STRICT_LOW_PART
2365 (VOIDmode,
2366 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2367 SET_DEST (PATTERN (p)), 0)),
2368 XEXP (SET_SRC (PATTERN (p)), 0));
2370 insn_code_number = recog (new, p);
2372 if (insn_code_number)
2374 register int i;
2376 /* Clear destination register before the loop. */
2377 emit_insn_before (gen_rtx_SET (VOIDmode,
2378 SET_DEST (PATTERN (p)), const0_rtx),
2379 loop_start);
2381 /* Inside the loop, just load the low part. */
2382 PATTERN (p) = new;
2385 #endif
2387 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2388 `has_call', `has_volatile', and `has_tablejump' within LOOP_INFO.
2389 Set the global variables `unknown_address_altered' and
2390 `num_mem_sets'. Also, fill in the array `loop_mems' and the list
2391 `loop_store_mems'. */
2393 static void
2394 prescan_loop (start, end, loop_info)
2395 rtx start, end;
2396 struct loop_info *loop_info;
2398 register int level = 1;
2399 rtx insn;
2400 /* The label after END. Jumping here is just like falling off the
2401 end of the loop. We use next_nonnote_insn instead of next_label
2402 as a hedge against the (pathological) case where some actual insn
2403 might end up between the two. */
2404 rtx exit_target = next_nonnote_insn (end);
2406 loop_info->num = uid_loop_num [INSN_UID (start)];
2407 loop_info->has_indirect_jump = indirect_jump_in_function;
2408 loop_info->has_call = 0;
2409 loop_info->has_volatile = 0;
2410 loop_info->has_tablejump = 0;
2411 loop_info->loops_enclosed = 1;
2412 loop_info->has_multiple_exit_targets = 0;
2413 loop_info->cont = 0;
2414 loop_info->vtop = 0;
2416 unknown_address_altered = 0;
2417 loop_store_mems = NULL_RTX;
2418 first_loop_store_insn = NULL_RTX;
2419 loop_mems_idx = 0;
2420 num_mem_sets = 0;
2422 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2423 insn = NEXT_INSN (insn))
2425 if (GET_CODE (insn) == NOTE)
2427 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2429 ++level;
2430 /* Count number of loops contained in this one. */
2431 loop_info->loops_enclosed++;
2433 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2435 --level;
2436 if (level == 0)
2438 end = insn;
2439 break;
2442 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2444 if (level == 1)
2445 loop_info->cont = insn;
2447 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP)
2449 /* If there is a NOTE_INSN_LOOP_VTOP, then this is a for
2450 or while style loop, with a loop exit test at the
2451 start. Thus, we can assume that the loop condition
2452 was true when the loop was entered. */
2453 if (level == 1)
2454 loop_info->vtop = insn;
2457 else if (GET_CODE (insn) == CALL_INSN)
2459 if (! CONST_CALL_P (insn))
2460 unknown_address_altered = 1;
2461 loop_info->has_call = 1;
2463 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2465 rtx label1 = NULL_RTX;
2466 rtx label2 = NULL_RTX;
2468 if (volatile_refs_p (PATTERN (insn)))
2469 loop_info->has_volatile = 1;
2471 if (GET_CODE (insn) == JUMP_INSN
2472 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2473 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2474 loop_info->has_tablejump = 1;
2476 note_stores (PATTERN (insn), note_addr_stored, NULL);
2477 if (! first_loop_store_insn && loop_store_mems)
2478 first_loop_store_insn = insn;
2480 if (! loop_info->has_multiple_exit_targets
2481 && GET_CODE (insn) == JUMP_INSN
2482 && GET_CODE (PATTERN (insn)) == SET
2483 && SET_DEST (PATTERN (insn)) == pc_rtx)
2485 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2487 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2488 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2490 else
2492 label1 = SET_SRC (PATTERN (insn));
2495 do {
2496 if (label1 && label1 != pc_rtx)
2498 if (GET_CODE (label1) != LABEL_REF)
2500 /* Something tricky. */
2501 loop_info->has_multiple_exit_targets = 1;
2502 break;
2504 else if (XEXP (label1, 0) != exit_target
2505 && LABEL_OUTSIDE_LOOP_P (label1))
2507 /* A jump outside the current loop. */
2508 loop_info->has_multiple_exit_targets = 1;
2509 break;
2513 label1 = label2;
2514 label2 = NULL_RTX;
2515 } while (label1);
2518 else if (GET_CODE (insn) == RETURN)
2519 loop_info->has_multiple_exit_targets = 1;
2522 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2523 if (/* We can't tell what MEMs are aliased by what. */
2524 !unknown_address_altered
2525 /* An exception thrown by a called function might land us
2526 anywhere. */
2527 && !loop_info->has_call
2528 /* We don't want loads for MEMs moved to a location before the
2529 one at which their stack memory becomes allocated. (Note
2530 that this is not a problem for malloc, etc., since those
2531 require actual function calls. */
2532 && !current_function_calls_alloca
2533 /* There are ways to leave the loop other than falling off the
2534 end. */
2535 && !loop_info->has_multiple_exit_targets)
2536 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2537 insn = NEXT_INSN (insn))
2538 for_each_rtx (&insn, insert_loop_mem, 0);
2541 /* LOOP_NUMBER_CONT_DOMINATOR is now the last label between the loop start
2542 and the continue note that is a the destination of a (cond)jump after
2543 the continue note. If there is any (cond)jump between the loop start
2544 and what we have so far as LOOP_NUMBER_CONT_DOMINATOR that has a
2545 target between LOOP_DOMINATOR and the continue note, move
2546 LOOP_NUMBER_CONT_DOMINATOR forward to that label; if a jump's
2547 destination cannot be determined, clear LOOP_NUMBER_CONT_DOMINATOR. */
2549 static void
2550 verify_dominator (loop_number)
2551 int loop_number;
2553 rtx insn;
2555 if (! loop_number_cont_dominator[loop_number])
2556 /* This can happen for an empty loop, e.g. in
2557 gcc.c-torture/compile/920410-2.c */
2558 return;
2559 if (loop_number_cont_dominator[loop_number] == const0_rtx)
2561 loop_number_cont_dominator[loop_number] = 0;
2562 return;
2564 for (insn = loop_number_loop_starts[loop_number];
2565 insn != loop_number_cont_dominator[loop_number];
2566 insn = NEXT_INSN (insn))
2568 if (GET_CODE (insn) == JUMP_INSN
2569 && GET_CODE (PATTERN (insn)) != RETURN)
2571 rtx label = JUMP_LABEL (insn);
2572 int label_luid;
2574 /* If it is not a jump we can easily understand or for
2575 which we do not have jump target information in the JUMP_LABEL
2576 field (consider ADDR_VEC and ADDR_DIFF_VEC insns), then clear
2577 LOOP_NUMBER_CONT_DOMINATOR. */
2578 if ((! condjump_p (insn)
2579 && ! condjump_in_parallel_p (insn))
2580 || label == NULL_RTX)
2582 loop_number_cont_dominator[loop_number] = NULL_RTX;
2583 return;
2586 label_luid = INSN_LUID (label);
2587 if (label_luid < INSN_LUID (loop_number_loop_cont[loop_number])
2588 && (label_luid
2589 > INSN_LUID (loop_number_cont_dominator[loop_number])))
2590 loop_number_cont_dominator[loop_number] = label;
2595 /* Scan the function looking for loops. Record the start and end of each loop.
2596 Also mark as invalid loops any loops that contain a setjmp or are branched
2597 to from outside the loop. */
2599 static void
2600 find_and_verify_loops (f)
2601 rtx f;
2603 rtx insn, label;
2604 int current_loop = -1;
2605 int next_loop = -1;
2606 int loop;
2608 compute_luids (f, NULL_RTX, 0);
2610 /* If there are jumps to undefined labels,
2611 treat them as jumps out of any/all loops.
2612 This also avoids writing past end of tables when there are no loops. */
2613 uid_loop_num[0] = -1;
2615 /* Find boundaries of loops, mark which loops are contained within
2616 loops, and invalidate loops that have setjmp. */
2618 for (insn = f; insn; insn = NEXT_INSN (insn))
2620 if (GET_CODE (insn) == NOTE)
2621 switch (NOTE_LINE_NUMBER (insn))
2623 case NOTE_INSN_LOOP_BEG:
2624 loop_number_loop_starts[++next_loop] = insn;
2625 loop_number_loop_ends[next_loop] = 0;
2626 loop_number_loop_cont[next_loop] = 0;
2627 loop_number_cont_dominator[next_loop] = 0;
2628 loop_outer_loop[next_loop] = current_loop;
2629 loop_invalid[next_loop] = 0;
2630 loop_number_exit_labels[next_loop] = 0;
2631 loop_number_exit_count[next_loop] = 0;
2632 current_loop = next_loop;
2633 break;
2635 case NOTE_INSN_SETJMP:
2636 /* In this case, we must invalidate our current loop and any
2637 enclosing loop. */
2638 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2640 loop_invalid[loop] = 1;
2641 if (loop_dump_stream)
2642 fprintf (loop_dump_stream,
2643 "\nLoop at %d ignored due to setjmp.\n",
2644 INSN_UID (loop_number_loop_starts[loop]));
2646 break;
2648 case NOTE_INSN_LOOP_CONT:
2649 loop_number_loop_cont[current_loop] = insn;
2650 break;
2651 case NOTE_INSN_LOOP_END:
2652 if (current_loop == -1)
2653 abort ();
2655 loop_number_loop_ends[current_loop] = insn;
2656 verify_dominator (current_loop);
2657 current_loop = loop_outer_loop[current_loop];
2658 break;
2660 default:
2661 break;
2663 /* If for any loop, this is a jump insn between the NOTE_INSN_LOOP_CONT
2664 and NOTE_INSN_LOOP_END notes, update loop_number_loop_dominator. */
2665 else if (GET_CODE (insn) == JUMP_INSN
2666 && GET_CODE (PATTERN (insn)) != RETURN
2667 && current_loop >= 0)
2669 int this_loop_num;
2670 rtx label = JUMP_LABEL (insn);
2672 if (! condjump_p (insn) && ! condjump_in_parallel_p (insn))
2673 label = NULL_RTX;
2675 this_loop_num = current_loop;
2678 /* First see if we care about this loop. */
2679 if (loop_number_loop_cont[this_loop_num]
2680 && loop_number_cont_dominator[this_loop_num] != const0_rtx)
2682 /* If the jump destination is not known, invalidate
2683 loop_number_const_dominator. */
2684 if (! label)
2685 loop_number_cont_dominator[this_loop_num] = const0_rtx;
2686 else
2687 /* Check if the destination is between loop start and
2688 cont. */
2689 if ((INSN_LUID (label)
2690 < INSN_LUID (loop_number_loop_cont[this_loop_num]))
2691 && (INSN_LUID (label)
2692 > INSN_LUID (loop_number_loop_starts[this_loop_num]))
2693 /* And if there is no later destination already
2694 recorded. */
2695 && (! loop_number_cont_dominator[this_loop_num]
2696 || (INSN_LUID (label)
2697 > INSN_LUID (loop_number_cont_dominator
2698 [this_loop_num]))))
2699 loop_number_cont_dominator[this_loop_num] = label;
2701 this_loop_num = loop_outer_loop[this_loop_num];
2703 while (this_loop_num >= 0);
2706 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2707 enclosing loop, but this doesn't matter. */
2708 uid_loop_num[INSN_UID (insn)] = current_loop;
2711 /* Any loop containing a label used in an initializer must be invalidated,
2712 because it can be jumped into from anywhere. */
2714 for (label = forced_labels; label; label = XEXP (label, 1))
2716 int loop_num;
2718 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2719 loop_num != -1;
2720 loop_num = loop_outer_loop[loop_num])
2721 loop_invalid[loop_num] = 1;
2724 /* Any loop containing a label used for an exception handler must be
2725 invalidated, because it can be jumped into from anywhere. */
2727 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2729 int loop_num;
2731 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2732 loop_num != -1;
2733 loop_num = loop_outer_loop[loop_num])
2734 loop_invalid[loop_num] = 1;
2737 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2738 loop that it is not contained within, that loop is marked invalid.
2739 If any INSN or CALL_INSN uses a label's address, then the loop containing
2740 that label is marked invalid, because it could be jumped into from
2741 anywhere.
2743 Also look for blocks of code ending in an unconditional branch that
2744 exits the loop. If such a block is surrounded by a conditional
2745 branch around the block, move the block elsewhere (see below) and
2746 invert the jump to point to the code block. This may eliminate a
2747 label in our loop and will simplify processing by both us and a
2748 possible second cse pass. */
2750 for (insn = f; insn; insn = NEXT_INSN (insn))
2751 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2753 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2755 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2757 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2758 if (note)
2760 int loop_num;
2762 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2763 loop_num != -1;
2764 loop_num = loop_outer_loop[loop_num])
2765 loop_invalid[loop_num] = 1;
2769 if (GET_CODE (insn) != JUMP_INSN)
2770 continue;
2772 mark_loop_jump (PATTERN (insn), this_loop_num);
2774 /* See if this is an unconditional branch outside the loop. */
2775 if (this_loop_num != -1
2776 && (GET_CODE (PATTERN (insn)) == RETURN
2777 || (simplejump_p (insn)
2778 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2779 != this_loop_num)))
2780 && get_max_uid () < max_uid_for_loop)
2782 rtx p;
2783 rtx our_next = next_real_insn (insn);
2784 rtx last_insn_to_move = NEXT_INSN (insn);
2785 int dest_loop;
2786 int outer_loop = -1;
2788 /* Go backwards until we reach the start of the loop, a label,
2789 or a JUMP_INSN. */
2790 for (p = PREV_INSN (insn);
2791 GET_CODE (p) != CODE_LABEL
2792 && ! (GET_CODE (p) == NOTE
2793 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2794 && GET_CODE (p) != JUMP_INSN;
2795 p = PREV_INSN (p))
2798 /* Check for the case where we have a jump to an inner nested
2799 loop, and do not perform the optimization in that case. */
2801 if (JUMP_LABEL (insn))
2803 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2804 if (dest_loop != -1)
2806 for (outer_loop = dest_loop; outer_loop != -1;
2807 outer_loop = loop_outer_loop[outer_loop])
2808 if (outer_loop == this_loop_num)
2809 break;
2813 /* Make sure that the target of P is within the current loop. */
2815 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2816 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2817 outer_loop = this_loop_num;
2819 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2820 we have a block of code to try to move.
2822 We look backward and then forward from the target of INSN
2823 to find a BARRIER at the same loop depth as the target.
2824 If we find such a BARRIER, we make a new label for the start
2825 of the block, invert the jump in P and point it to that label,
2826 and move the block of code to the spot we found. */
2828 if (outer_loop == -1
2829 && GET_CODE (p) == JUMP_INSN
2830 && JUMP_LABEL (p) != 0
2831 /* Just ignore jumps to labels that were never emitted.
2832 These always indicate compilation errors. */
2833 && INSN_UID (JUMP_LABEL (p)) != 0
2834 && condjump_p (p)
2835 && ! simplejump_p (p)
2836 && next_real_insn (JUMP_LABEL (p)) == our_next
2837 /* If it's not safe to move the sequence, then we
2838 mustn't try. */
2839 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2840 &last_insn_to_move))
2842 rtx target
2843 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2844 int target_loop_num = uid_loop_num[INSN_UID (target)];
2845 rtx loc, loc2;
2847 for (loc = target; loc; loc = PREV_INSN (loc))
2848 if (GET_CODE (loc) == BARRIER
2849 /* Don't move things inside a tablejump. */
2850 && ((loc2 = next_nonnote_insn (loc)) == 0
2851 || GET_CODE (loc2) != CODE_LABEL
2852 || (loc2 = next_nonnote_insn (loc2)) == 0
2853 || GET_CODE (loc2) != JUMP_INSN
2854 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2855 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2856 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2857 break;
2859 if (loc == 0)
2860 for (loc = target; loc; loc = NEXT_INSN (loc))
2861 if (GET_CODE (loc) == BARRIER
2862 /* Don't move things inside a tablejump. */
2863 && ((loc2 = next_nonnote_insn (loc)) == 0
2864 || GET_CODE (loc2) != CODE_LABEL
2865 || (loc2 = next_nonnote_insn (loc2)) == 0
2866 || GET_CODE (loc2) != JUMP_INSN
2867 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2868 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2869 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2870 break;
2872 if (loc)
2874 rtx cond_label = JUMP_LABEL (p);
2875 rtx new_label = get_label_after (p);
2877 /* Ensure our label doesn't go away. */
2878 LABEL_NUSES (cond_label)++;
2880 /* Verify that uid_loop_num is large enough and that
2881 we can invert P. */
2882 if (invert_jump (p, new_label))
2884 rtx q, r;
2886 /* If no suitable BARRIER was found, create a suitable
2887 one before TARGET. Since TARGET is a fall through
2888 path, we'll need to insert an jump around our block
2889 and a add a BARRIER before TARGET.
2891 This creates an extra unconditional jump outside
2892 the loop. However, the benefits of removing rarely
2893 executed instructions from inside the loop usually
2894 outweighs the cost of the extra unconditional jump
2895 outside the loop. */
2896 if (loc == 0)
2898 rtx temp;
2900 temp = gen_jump (JUMP_LABEL (insn));
2901 temp = emit_jump_insn_before (temp, target);
2902 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2903 LABEL_NUSES (JUMP_LABEL (insn))++;
2904 loc = emit_barrier_before (target);
2907 /* Include the BARRIER after INSN and copy the
2908 block after LOC. */
2909 new_label = squeeze_notes (new_label,
2910 last_insn_to_move);
2911 reorder_insns (new_label, last_insn_to_move, loc);
2913 /* All those insns are now in TARGET_LOOP_NUM. */
2914 for (q = new_label;
2915 q != NEXT_INSN (last_insn_to_move);
2916 q = NEXT_INSN (q))
2917 uid_loop_num[INSN_UID (q)] = target_loop_num;
2919 /* The label jumped to by INSN is no longer a loop exit.
2920 Unless INSN does not have a label (e.g., it is a
2921 RETURN insn), search loop_number_exit_labels to find
2922 its label_ref, and remove it. Also turn off
2923 LABEL_OUTSIDE_LOOP_P bit. */
2924 if (JUMP_LABEL (insn))
2926 int loop_num;
2928 for (q = 0,
2929 r = loop_number_exit_labels[this_loop_num];
2930 r; q = r, r = LABEL_NEXTREF (r))
2931 if (XEXP (r, 0) == JUMP_LABEL (insn))
2933 LABEL_OUTSIDE_LOOP_P (r) = 0;
2934 if (q)
2935 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2936 else
2937 loop_number_exit_labels[this_loop_num]
2938 = LABEL_NEXTREF (r);
2939 break;
2942 for (loop_num = this_loop_num;
2943 loop_num != -1 && loop_num != target_loop_num;
2944 loop_num = loop_outer_loop[loop_num])
2945 loop_number_exit_count[loop_num]--;
2947 /* If we didn't find it, then something is wrong. */
2948 if (! r)
2949 abort ();
2952 /* P is now a jump outside the loop, so it must be put
2953 in loop_number_exit_labels, and marked as such.
2954 The easiest way to do this is to just call
2955 mark_loop_jump again for P. */
2956 mark_loop_jump (PATTERN (p), this_loop_num);
2958 /* If INSN now jumps to the insn after it,
2959 delete INSN. */
2960 if (JUMP_LABEL (insn) != 0
2961 && (next_real_insn (JUMP_LABEL (insn))
2962 == next_real_insn (insn)))
2963 delete_insn (insn);
2966 /* Continue the loop after where the conditional
2967 branch used to jump, since the only branch insn
2968 in the block (if it still remains) is an inter-loop
2969 branch and hence needs no processing. */
2970 insn = NEXT_INSN (cond_label);
2972 if (--LABEL_NUSES (cond_label) == 0)
2973 delete_insn (cond_label);
2975 /* This loop will be continued with NEXT_INSN (insn). */
2976 insn = PREV_INSN (insn);
2983 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2984 loops it is contained in, mark the target loop invalid.
2986 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2988 static void
2989 mark_loop_jump (x, loop_num)
2990 rtx x;
2991 int loop_num;
2993 int dest_loop;
2994 int outer_loop;
2995 int i;
2997 switch (GET_CODE (x))
2999 case PC:
3000 case USE:
3001 case CLOBBER:
3002 case REG:
3003 case MEM:
3004 case CONST_INT:
3005 case CONST_DOUBLE:
3006 case RETURN:
3007 return;
3009 case CONST:
3010 /* There could be a label reference in here. */
3011 mark_loop_jump (XEXP (x, 0), loop_num);
3012 return;
3014 case PLUS:
3015 case MINUS:
3016 case MULT:
3017 mark_loop_jump (XEXP (x, 0), loop_num);
3018 mark_loop_jump (XEXP (x, 1), loop_num);
3019 return;
3021 case LO_SUM:
3022 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3023 mark_loop_jump (XEXP (x, 1), loop_num);
3024 return;
3026 case SIGN_EXTEND:
3027 case ZERO_EXTEND:
3028 mark_loop_jump (XEXP (x, 0), loop_num);
3029 return;
3031 case LABEL_REF:
3032 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
3034 /* Link together all labels that branch outside the loop. This
3035 is used by final_[bg]iv_value and the loop unrolling code. Also
3036 mark this LABEL_REF so we know that this branch should predict
3037 false. */
3039 /* A check to make sure the label is not in an inner nested loop,
3040 since this does not count as a loop exit. */
3041 if (dest_loop != -1)
3043 for (outer_loop = dest_loop; outer_loop != -1;
3044 outer_loop = loop_outer_loop[outer_loop])
3045 if (outer_loop == loop_num)
3046 break;
3048 else
3049 outer_loop = -1;
3051 if (loop_num != -1 && outer_loop == -1)
3053 LABEL_OUTSIDE_LOOP_P (x) = 1;
3054 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
3055 loop_number_exit_labels[loop_num] = x;
3057 for (outer_loop = loop_num;
3058 outer_loop != -1 && outer_loop != dest_loop;
3059 outer_loop = loop_outer_loop[outer_loop])
3060 loop_number_exit_count[outer_loop]++;
3063 /* If this is inside a loop, but not in the current loop or one enclosed
3064 by it, it invalidates at least one loop. */
3066 if (dest_loop == -1)
3067 return;
3069 /* We must invalidate every nested loop containing the target of this
3070 label, except those that also contain the jump insn. */
3072 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
3074 /* Stop when we reach a loop that also contains the jump insn. */
3075 for (outer_loop = loop_num; outer_loop != -1;
3076 outer_loop = loop_outer_loop[outer_loop])
3077 if (dest_loop == outer_loop)
3078 return;
3080 /* If we get here, we know we need to invalidate a loop. */
3081 if (loop_dump_stream && ! loop_invalid[dest_loop])
3082 fprintf (loop_dump_stream,
3083 "\nLoop at %d ignored due to multiple entry points.\n",
3084 INSN_UID (loop_number_loop_starts[dest_loop]));
3086 loop_invalid[dest_loop] = 1;
3088 return;
3090 case SET:
3091 /* If this is not setting pc, ignore. */
3092 if (SET_DEST (x) == pc_rtx)
3093 mark_loop_jump (SET_SRC (x), loop_num);
3094 return;
3096 case IF_THEN_ELSE:
3097 mark_loop_jump (XEXP (x, 1), loop_num);
3098 mark_loop_jump (XEXP (x, 2), loop_num);
3099 return;
3101 case PARALLEL:
3102 case ADDR_VEC:
3103 for (i = 0; i < XVECLEN (x, 0); i++)
3104 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
3105 return;
3107 case ADDR_DIFF_VEC:
3108 for (i = 0; i < XVECLEN (x, 1); i++)
3109 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
3110 return;
3112 default:
3113 /* Strictly speaking this is not a jump into the loop, only a possible
3114 jump out of the loop. However, we have no way to link the destination
3115 of this jump onto the list of exit labels. To be safe we mark this
3116 loop and any containing loops as invalid. */
3117 if (loop_num != -1)
3119 for (outer_loop = loop_num; outer_loop != -1;
3120 outer_loop = loop_outer_loop[outer_loop])
3122 if (loop_dump_stream && ! loop_invalid[outer_loop])
3123 fprintf (loop_dump_stream,
3124 "\nLoop at %d ignored due to unknown exit jump.\n",
3125 INSN_UID (loop_number_loop_starts[outer_loop]));
3126 loop_invalid[outer_loop] = 1;
3129 return;
3133 /* Return nonzero if there is a label in the range from
3134 insn INSN to and including the insn whose luid is END
3135 INSN must have an assigned luid (i.e., it must not have
3136 been previously created by loop.c). */
3138 static int
3139 labels_in_range_p (insn, end)
3140 rtx insn;
3141 int end;
3143 while (insn && INSN_LUID (insn) <= end)
3145 if (GET_CODE (insn) == CODE_LABEL)
3146 return 1;
3147 insn = NEXT_INSN (insn);
3150 return 0;
3153 /* Record that a memory reference X is being set. */
3155 static void
3156 note_addr_stored (x, y, data)
3157 rtx x;
3158 rtx y ATTRIBUTE_UNUSED;
3159 void *data ATTRIBUTE_UNUSED;
3161 if (x == 0 || GET_CODE (x) != MEM)
3162 return;
3164 /* Count number of memory writes.
3165 This affects heuristics in strength_reduce. */
3166 num_mem_sets++;
3168 /* BLKmode MEM means all memory is clobbered. */
3169 if (GET_MODE (x) == BLKmode)
3170 unknown_address_altered = 1;
3172 if (unknown_address_altered)
3173 return;
3175 loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_store_mems);
3178 /* X is a value modified by an INSN that references a biv inside a loop
3179 exit test (ie, X is somehow related to the value of the biv). If X
3180 is a pseudo that is used more than once, then the biv is (effectively)
3181 used more than once. DATA is really an `int *', and is set if the
3182 biv is used more than once. */
3184 static void
3185 note_set_pseudo_multiple_uses (x, y, data)
3186 rtx x;
3187 rtx y ATTRIBUTE_UNUSED;
3188 void *data;
3190 if (x == 0)
3191 return;
3193 while (GET_CODE (x) == STRICT_LOW_PART
3194 || GET_CODE (x) == SIGN_EXTRACT
3195 || GET_CODE (x) == ZERO_EXTRACT
3196 || GET_CODE (x) == SUBREG)
3197 x = XEXP (x, 0);
3199 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3200 return;
3202 /* If we do not have usage information, or if we know the register
3203 is used more than once, note that fact for check_dbra_loop. */
3204 if (REGNO (x) >= max_reg_before_loop
3205 || ! VARRAY_RTX (reg_single_usage, REGNO (x))
3206 || VARRAY_RTX (reg_single_usage, REGNO (x)) == const0_rtx)
3207 *((int *) data) = 1;
3210 /* Return nonzero if the rtx X is invariant over the current loop.
3212 The value is 2 if we refer to something only conditionally invariant.
3214 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3215 Otherwise, a memory ref is invariant if it does not conflict with
3216 anything stored in `loop_store_mems'. */
3219 invariant_p (x)
3220 register rtx x;
3222 register int i;
3223 register enum rtx_code code;
3224 register const char *fmt;
3225 int conditional = 0;
3226 rtx mem_list_entry;
3228 if (x == 0)
3229 return 1;
3230 code = GET_CODE (x);
3231 switch (code)
3233 case CONST_INT:
3234 case CONST_DOUBLE:
3235 case SYMBOL_REF:
3236 case CONST:
3237 return 1;
3239 case LABEL_REF:
3240 /* A LABEL_REF is normally invariant, however, if we are unrolling
3241 loops, and this label is inside the loop, then it isn't invariant.
3242 This is because each unrolled copy of the loop body will have
3243 a copy of this label. If this was invariant, then an insn loading
3244 the address of this label into a register might get moved outside
3245 the loop, and then each loop body would end up using the same label.
3247 We don't know the loop bounds here though, so just fail for all
3248 labels. */
3249 if (flag_unroll_loops)
3250 return 0;
3251 else
3252 return 1;
3254 case PC:
3255 case CC0:
3256 case UNSPEC_VOLATILE:
3257 return 0;
3259 case REG:
3260 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3261 since the reg might be set by initialization within the loop. */
3263 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3264 || x == arg_pointer_rtx)
3265 && ! current_function_has_nonlocal_goto)
3266 return 1;
3268 if (this_loop_info.has_call
3269 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3270 return 0;
3272 if (VARRAY_INT (set_in_loop, REGNO (x)) < 0)
3273 return 2;
3275 return VARRAY_INT (set_in_loop, REGNO (x)) == 0;
3277 case MEM:
3278 /* Volatile memory references must be rejected. Do this before
3279 checking for read-only items, so that volatile read-only items
3280 will be rejected also. */
3281 if (MEM_VOLATILE_P (x))
3282 return 0;
3284 /* Read-only items (such as constants in a constant pool) are
3285 invariant if their address is. */
3286 if (RTX_UNCHANGING_P (x))
3287 break;
3289 /* If we had a subroutine call, any location in memory could have been
3290 clobbered. */
3291 if (unknown_address_altered)
3292 return 0;
3294 /* See if there is any dependence between a store and this load. */
3295 mem_list_entry = loop_store_mems;
3296 while (mem_list_entry)
3298 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3299 x, rtx_varies_p))
3300 return 0;
3301 mem_list_entry = XEXP (mem_list_entry, 1);
3304 /* It's not invalidated by a store in memory
3305 but we must still verify the address is invariant. */
3306 break;
3308 case ASM_OPERANDS:
3309 /* Don't mess with insns declared volatile. */
3310 if (MEM_VOLATILE_P (x))
3311 return 0;
3312 break;
3314 default:
3315 break;
3318 fmt = GET_RTX_FORMAT (code);
3319 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3321 if (fmt[i] == 'e')
3323 int tem = invariant_p (XEXP (x, i));
3324 if (tem == 0)
3325 return 0;
3326 if (tem == 2)
3327 conditional = 1;
3329 else if (fmt[i] == 'E')
3331 register int j;
3332 for (j = 0; j < XVECLEN (x, i); j++)
3334 int tem = invariant_p (XVECEXP (x, i, j));
3335 if (tem == 0)
3336 return 0;
3337 if (tem == 2)
3338 conditional = 1;
3344 return 1 + conditional;
3348 /* Return nonzero if all the insns in the loop that set REG
3349 are INSN and the immediately following insns,
3350 and if each of those insns sets REG in an invariant way
3351 (not counting uses of REG in them).
3353 The value is 2 if some of these insns are only conditionally invariant.
3355 We assume that INSN itself is the first set of REG
3356 and that its source is invariant. */
3358 static int
3359 consec_sets_invariant_p (reg, n_sets, insn)
3360 int n_sets;
3361 rtx reg, insn;
3363 register rtx p = insn;
3364 register int regno = REGNO (reg);
3365 rtx temp;
3366 /* Number of sets we have to insist on finding after INSN. */
3367 int count = n_sets - 1;
3368 int old = VARRAY_INT (set_in_loop, regno);
3369 int value = 0;
3370 int this;
3372 /* If N_SETS hit the limit, we can't rely on its value. */
3373 if (n_sets == 127)
3374 return 0;
3376 VARRAY_INT (set_in_loop, regno) = 0;
3378 while (count > 0)
3380 register enum rtx_code code;
3381 rtx set;
3383 p = NEXT_INSN (p);
3384 code = GET_CODE (p);
3386 /* If library call, skip to end of it. */
3387 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3388 p = XEXP (temp, 0);
3390 this = 0;
3391 if (code == INSN
3392 && (set = single_set (p))
3393 && GET_CODE (SET_DEST (set)) == REG
3394 && REGNO (SET_DEST (set)) == regno)
3396 this = invariant_p (SET_SRC (set));
3397 if (this != 0)
3398 value |= this;
3399 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3401 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3402 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3403 notes are OK. */
3404 this = (CONSTANT_P (XEXP (temp, 0))
3405 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3406 && invariant_p (XEXP (temp, 0))));
3407 if (this != 0)
3408 value |= this;
3411 if (this != 0)
3412 count--;
3413 else if (code != NOTE)
3415 VARRAY_INT (set_in_loop, regno) = old;
3416 return 0;
3420 VARRAY_INT (set_in_loop, regno) = old;
3421 /* If invariant_p ever returned 2, we return 2. */
3422 return 1 + (value & 2);
3425 #if 0
3426 /* I don't think this condition is sufficient to allow INSN
3427 to be moved, so we no longer test it. */
3429 /* Return 1 if all insns in the basic block of INSN and following INSN
3430 that set REG are invariant according to TABLE. */
3432 static int
3433 all_sets_invariant_p (reg, insn, table)
3434 rtx reg, insn;
3435 short *table;
3437 register rtx p = insn;
3438 register int regno = REGNO (reg);
3440 while (1)
3442 register enum rtx_code code;
3443 p = NEXT_INSN (p);
3444 code = GET_CODE (p);
3445 if (code == CODE_LABEL || code == JUMP_INSN)
3446 return 1;
3447 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3448 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3449 && REGNO (SET_DEST (PATTERN (p))) == regno)
3451 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3452 return 0;
3456 #endif /* 0 */
3458 /* Look at all uses (not sets) of registers in X. For each, if it is
3459 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3460 a different insn, set USAGE[REGNO] to const0_rtx. */
3462 static void
3463 find_single_use_in_loop (insn, x, usage)
3464 rtx insn;
3465 rtx x;
3466 varray_type usage;
3468 enum rtx_code code = GET_CODE (x);
3469 const char *fmt = GET_RTX_FORMAT (code);
3470 int i, j;
3472 if (code == REG)
3473 VARRAY_RTX (usage, REGNO (x))
3474 = (VARRAY_RTX (usage, REGNO (x)) != 0
3475 && VARRAY_RTX (usage, REGNO (x)) != insn)
3476 ? const0_rtx : insn;
3478 else if (code == SET)
3480 /* Don't count SET_DEST if it is a REG; otherwise count things
3481 in SET_DEST because if a register is partially modified, it won't
3482 show up as a potential movable so we don't care how USAGE is set
3483 for it. */
3484 if (GET_CODE (SET_DEST (x)) != REG)
3485 find_single_use_in_loop (insn, SET_DEST (x), usage);
3486 find_single_use_in_loop (insn, SET_SRC (x), usage);
3488 else
3489 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3491 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3492 find_single_use_in_loop (insn, XEXP (x, i), usage);
3493 else if (fmt[i] == 'E')
3494 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3495 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3499 /* Count and record any set in X which is contained in INSN. Update
3500 MAY_NOT_MOVE and LAST_SET for any register set in X. */
3502 static void
3503 count_one_set (insn, x, may_not_move, last_set)
3504 rtx insn, x;
3505 varray_type may_not_move;
3506 rtx *last_set;
3508 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3509 /* Don't move a reg that has an explicit clobber.
3510 It's not worth the pain to try to do it correctly. */
3511 VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
3513 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3515 rtx dest = SET_DEST (x);
3516 while (GET_CODE (dest) == SUBREG
3517 || GET_CODE (dest) == ZERO_EXTRACT
3518 || GET_CODE (dest) == SIGN_EXTRACT
3519 || GET_CODE (dest) == STRICT_LOW_PART)
3520 dest = XEXP (dest, 0);
3521 if (GET_CODE (dest) == REG)
3523 register int regno = REGNO (dest);
3524 /* If this is the first setting of this reg
3525 in current basic block, and it was set before,
3526 it must be set in two basic blocks, so it cannot
3527 be moved out of the loop. */
3528 if (VARRAY_INT (set_in_loop, regno) > 0
3529 && last_set[regno] == 0)
3530 VARRAY_CHAR (may_not_move, regno) = 1;
3531 /* If this is not first setting in current basic block,
3532 see if reg was used in between previous one and this.
3533 If so, neither one can be moved. */
3534 if (last_set[regno] != 0
3535 && reg_used_between_p (dest, last_set[regno], insn))
3536 VARRAY_CHAR (may_not_move, regno) = 1;
3537 if (VARRAY_INT (set_in_loop, regno) < 127)
3538 ++VARRAY_INT (set_in_loop, regno);
3539 last_set[regno] = insn;
3544 /* Increment SET_IN_LOOP at the index of each register
3545 that is modified by an insn between FROM and TO.
3546 If the value of an element of SET_IN_LOOP becomes 127 or more,
3547 stop incrementing it, to avoid overflow.
3549 Store in SINGLE_USAGE[I] the single insn in which register I is
3550 used, if it is only used once. Otherwise, it is set to 0 (for no
3551 uses) or const0_rtx for more than one use. This parameter may be zero,
3552 in which case this processing is not done.
3554 Store in *COUNT_PTR the number of actual instruction
3555 in the loop. We use this to decide what is worth moving out. */
3557 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3558 In that case, it is the insn that last set reg n. */
3560 static void
3561 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3562 register rtx from, to;
3563 varray_type may_not_move;
3564 varray_type single_usage;
3565 int *count_ptr;
3566 int nregs;
3568 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3569 register rtx insn;
3570 register int count = 0;
3572 bzero ((char *) last_set, nregs * sizeof (rtx));
3573 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3575 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3577 ++count;
3579 /* Record registers that have exactly one use. */
3580 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3582 /* Include uses in REG_EQUAL notes. */
3583 if (REG_NOTES (insn))
3584 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3586 if (GET_CODE (PATTERN (insn)) == SET
3587 || GET_CODE (PATTERN (insn)) == CLOBBER)
3588 count_one_set (insn, PATTERN (insn), may_not_move, last_set);
3589 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3591 register int i;
3592 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3593 count_one_set (insn, XVECEXP (PATTERN (insn), 0, i),
3594 may_not_move, last_set);
3598 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3599 bzero ((char *) last_set, nregs * sizeof (rtx));
3601 *count_ptr = count;
3604 /* Given a loop that is bounded by LOOP_START and LOOP_END
3605 and that is entered at SCAN_START,
3606 return 1 if the register set in SET contained in insn INSN is used by
3607 any insn that precedes INSN in cyclic order starting
3608 from the loop entry point.
3610 We don't want to use INSN_LUID here because if we restrict INSN to those
3611 that have a valid INSN_LUID, it means we cannot move an invariant out
3612 from an inner loop past two loops. */
3614 static int
3615 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3616 rtx set, insn, loop_start, scan_start, loop_end;
3618 rtx reg = SET_DEST (set);
3619 rtx p;
3621 /* Scan forward checking for register usage. If we hit INSN, we
3622 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3623 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3625 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3626 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3627 return 1;
3629 if (p == loop_end)
3630 p = loop_start;
3633 return 0;
3636 /* A "basic induction variable" or biv is a pseudo reg that is set
3637 (within this loop) only by incrementing or decrementing it. */
3638 /* A "general induction variable" or giv is a pseudo reg whose
3639 value is a linear function of a biv. */
3641 /* Bivs are recognized by `basic_induction_var';
3642 Givs by `general_induction_var'. */
3644 /* Indexed by register number, indicates whether or not register is an
3645 induction variable, and if so what type. */
3647 varray_type reg_iv_type;
3649 /* Indexed by register number, contains pointer to `struct induction'
3650 if register is an induction variable. This holds general info for
3651 all induction variables. */
3653 varray_type reg_iv_info;
3655 /* Indexed by register number, contains pointer to `struct iv_class'
3656 if register is a basic induction variable. This holds info describing
3657 the class (a related group) of induction variables that the biv belongs
3658 to. */
3660 struct iv_class **reg_biv_class;
3662 /* The head of a list which links together (via the next field)
3663 every iv class for the current loop. */
3665 struct iv_class *loop_iv_list;
3667 /* Givs made from biv increments are always splittable for loop unrolling.
3668 Since there is no regscan info for them, we have to keep track of them
3669 separately. */
3670 int first_increment_giv, last_increment_giv;
3672 /* Communication with routines called via `note_stores'. */
3674 static rtx note_insn;
3676 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3678 static rtx addr_placeholder;
3680 /* ??? Unfinished optimizations, and possible future optimizations,
3681 for the strength reduction code. */
3683 /* ??? The interaction of biv elimination, and recognition of 'constant'
3684 bivs, may cause problems. */
3686 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3687 performance problems.
3689 Perhaps don't eliminate things that can be combined with an addressing
3690 mode. Find all givs that have the same biv, mult_val, and add_val;
3691 then for each giv, check to see if its only use dies in a following
3692 memory address. If so, generate a new memory address and check to see
3693 if it is valid. If it is valid, then store the modified memory address,
3694 otherwise, mark the giv as not done so that it will get its own iv. */
3696 /* ??? Could try to optimize branches when it is known that a biv is always
3697 positive. */
3699 /* ??? When replace a biv in a compare insn, we should replace with closest
3700 giv so that an optimized branch can still be recognized by the combiner,
3701 e.g. the VAX acb insn. */
3703 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3704 was rerun in loop_optimize whenever a register was added or moved.
3705 Also, some of the optimizations could be a little less conservative. */
3707 /* Perform strength reduction and induction variable elimination.
3709 Pseudo registers created during this function will be beyond the last
3710 valid index in several tables including n_times_set and regno_last_uid.
3711 This does not cause a problem here, because the added registers cannot be
3712 givs outside of their loop, and hence will never be reconsidered.
3713 But scan_loop must check regnos to make sure they are in bounds.
3715 SCAN_START is the first instruction in the loop, as the loop would
3716 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3717 the first instruction in the loop, as it is layed out in the
3718 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG.
3719 LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
3721 static void
3722 strength_reduce (scan_start, end, loop_top, insn_count,
3723 loop_start, loop_end, loop_info, loop_cont, unroll_p, bct_p)
3724 rtx scan_start;
3725 rtx end;
3726 rtx loop_top;
3727 int insn_count;
3728 rtx loop_start;
3729 rtx loop_end;
3730 struct loop_info *loop_info;
3731 rtx loop_cont;
3732 int unroll_p, bct_p ATTRIBUTE_UNUSED;
3734 rtx p;
3735 rtx set;
3736 rtx inc_val;
3737 rtx mult_val;
3738 rtx dest_reg;
3739 rtx *location;
3740 /* This is 1 if current insn is not executed at least once for every loop
3741 iteration. */
3742 int not_every_iteration = 0;
3743 /* This is 1 if current insn may be executed more than once for every
3744 loop iteration. */
3745 int maybe_multiple = 0;
3746 /* This is 1 if we have past a branch back to the top of the loop
3747 (aka a loop latch). */
3748 int past_loop_latch = 0;
3749 /* Temporary list pointers for traversing loop_iv_list. */
3750 struct iv_class *bl, **backbl;
3751 /* Ratio of extra register life span we can justify
3752 for saving an instruction. More if loop doesn't call subroutines
3753 since in that case saving an insn makes more difference
3754 and more registers are available. */
3755 /* ??? could set this to last value of threshold in move_movables */
3756 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3757 /* Map of pseudo-register replacements. */
3758 rtx *reg_map;
3759 int reg_map_size;
3760 int call_seen;
3761 rtx test;
3762 rtx end_insert_before;
3763 int loop_depth = 0;
3764 int n_extra_increment;
3765 int unrolled_insn_copies = 0;
3767 /* If scan_start points to the loop exit test, we have to be wary of
3768 subversive use of gotos inside expression statements. */
3769 if (prev_nonnote_insn (scan_start) != prev_nonnote_insn (loop_start))
3770 maybe_multiple = back_branch_in_range_p (scan_start, loop_start, loop_end);
3772 VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type");
3773 VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info");
3774 reg_biv_class = (struct iv_class **)
3775 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3776 bzero ((char *) reg_biv_class, (max_reg_before_loop
3777 * sizeof (struct iv_class *)));
3779 loop_iv_list = 0;
3780 addr_placeholder = gen_reg_rtx (Pmode);
3782 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3783 must be put before this insn, so that they will appear in the right
3784 order (i.e. loop order).
3786 If loop_end is the end of the current function, then emit a
3787 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3788 dummy note insn. */
3789 if (NEXT_INSN (loop_end) != 0)
3790 end_insert_before = NEXT_INSN (loop_end);
3791 else
3792 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3794 /* Scan through loop to find all possible bivs. */
3796 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
3797 p != NULL_RTX;
3798 p = next_insn_in_loop (p, scan_start, end, loop_top))
3800 if (GET_CODE (p) == INSN
3801 && (set = single_set (p))
3802 && GET_CODE (SET_DEST (set)) == REG)
3804 dest_reg = SET_DEST (set);
3805 if (REGNO (dest_reg) < max_reg_before_loop
3806 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3807 && REG_IV_TYPE (REGNO (dest_reg)) != NOT_BASIC_INDUCT)
3809 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3810 dest_reg, p, &inc_val, &mult_val,
3811 &location))
3813 /* It is a possible basic induction variable.
3814 Create and initialize an induction structure for it. */
3816 struct induction *v
3817 = (struct induction *) alloca (sizeof (struct induction));
3819 record_biv (v, p, dest_reg, inc_val, mult_val, location,
3820 not_every_iteration, maybe_multiple);
3821 REG_IV_TYPE (REGNO (dest_reg)) = BASIC_INDUCT;
3823 else if (REGNO (dest_reg) < max_reg_before_loop)
3824 REG_IV_TYPE (REGNO (dest_reg)) = NOT_BASIC_INDUCT;
3828 /* Past CODE_LABEL, we get to insns that may be executed multiple
3829 times. The only way we can be sure that they can't is if every
3830 jump insn between here and the end of the loop either
3831 returns, exits the loop, is a jump to a location that is still
3832 behind the label, or is a jump to the loop start. */
3834 if (GET_CODE (p) == CODE_LABEL)
3836 rtx insn = p;
3838 maybe_multiple = 0;
3840 while (1)
3842 insn = NEXT_INSN (insn);
3843 if (insn == scan_start)
3844 break;
3845 if (insn == end)
3847 if (loop_top != 0)
3848 insn = loop_top;
3849 else
3850 break;
3851 if (insn == scan_start)
3852 break;
3855 if (GET_CODE (insn) == JUMP_INSN
3856 && GET_CODE (PATTERN (insn)) != RETURN
3857 && (! condjump_p (insn)
3858 || (JUMP_LABEL (insn) != 0
3859 && JUMP_LABEL (insn) != scan_start
3860 && ! loop_insn_first_p (p, JUMP_LABEL (insn)))))
3862 maybe_multiple = 1;
3863 break;
3868 /* Past a jump, we get to insns for which we can't count
3869 on whether they will be executed during each iteration. */
3870 /* This code appears twice in strength_reduce. There is also similar
3871 code in scan_loop. */
3872 if (GET_CODE (p) == JUMP_INSN
3873 /* If we enter the loop in the middle, and scan around to the
3874 beginning, don't set not_every_iteration for that.
3875 This can be any kind of jump, since we want to know if insns
3876 will be executed if the loop is executed. */
3877 && ! (JUMP_LABEL (p) == loop_top
3878 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3879 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3881 rtx label = 0;
3883 /* If this is a jump outside the loop, then it also doesn't
3884 matter. Check to see if the target of this branch is on the
3885 loop_number_exits_labels list. */
3887 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3888 label;
3889 label = LABEL_NEXTREF (label))
3890 if (XEXP (label, 0) == JUMP_LABEL (p))
3891 break;
3893 if (! label)
3894 not_every_iteration = 1;
3897 else if (GET_CODE (p) == NOTE)
3899 /* At the virtual top of a converted loop, insns are again known to
3900 be executed each iteration: logically, the loop begins here
3901 even though the exit code has been duplicated.
3903 Insns are also again known to be executed each iteration at
3904 the LOOP_CONT note. */
3905 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3906 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3907 && loop_depth == 0)
3908 not_every_iteration = 0;
3909 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3910 loop_depth++;
3911 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3912 loop_depth--;
3915 /* Note if we pass a loop latch. If we do, then we can not clear
3916 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3917 a loop since a jump before the last CODE_LABEL may have started
3918 a new loop iteration.
3920 Note that LOOP_TOP is only set for rotated loops and we need
3921 this check for all loops, so compare against the CODE_LABEL
3922 which immediately follows LOOP_START. */
3923 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == NEXT_INSN (loop_start))
3924 past_loop_latch = 1;
3926 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3927 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3928 or not an insn is known to be executed each iteration of the
3929 loop, whether or not any iterations are known to occur.
3931 Therefore, if we have just passed a label and have no more labels
3932 between here and the test insn of the loop, and we have not passed
3933 a jump to the top of the loop, then we know these insns will be
3934 executed each iteration. */
3936 if (not_every_iteration
3937 && ! past_loop_latch
3938 && GET_CODE (p) == CODE_LABEL
3939 && no_labels_between_p (p, loop_end)
3940 && loop_insn_first_p (p, loop_cont))
3941 not_every_iteration = 0;
3944 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3945 Make a sanity check against n_times_set. */
3946 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3948 if (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
3949 /* Above happens if register modified by subreg, etc. */
3950 /* Make sure it is not recognized as a basic induction var: */
3951 || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
3952 /* If never incremented, it is invariant that we decided not to
3953 move. So leave it alone. */
3954 || ! bl->incremented)
3956 if (loop_dump_stream)
3957 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3958 bl->regno,
3959 (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
3960 ? "not induction variable"
3961 : (! bl->incremented ? "never incremented"
3962 : "count error")));
3964 REG_IV_TYPE (bl->regno) = NOT_BASIC_INDUCT;
3965 *backbl = bl->next;
3967 else
3969 backbl = &bl->next;
3971 if (loop_dump_stream)
3972 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3976 /* Exit if there are no bivs. */
3977 if (! loop_iv_list)
3979 /* Can still unroll the loop anyways, but indicate that there is no
3980 strength reduction info available. */
3981 if (unroll_p)
3982 unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
3983 loop_info, 0);
3985 goto egress;
3988 /* Find initial value for each biv by searching backwards from loop_start,
3989 halting at first label. Also record any test condition. */
3991 call_seen = 0;
3992 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3994 note_insn = p;
3996 if (GET_CODE (p) == CALL_INSN)
3997 call_seen = 1;
3999 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4000 || GET_CODE (p) == CALL_INSN)
4001 note_stores (PATTERN (p), record_initial, NULL);
4003 /* Record any test of a biv that branches around the loop if no store
4004 between it and the start of loop. We only care about tests with
4005 constants and registers and only certain of those. */
4006 if (GET_CODE (p) == JUMP_INSN
4007 && JUMP_LABEL (p) != 0
4008 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
4009 && (test = get_condition_for_loop (p)) != 0
4010 && GET_CODE (XEXP (test, 0)) == REG
4011 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4012 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
4013 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
4014 && bl->init_insn == 0)
4016 /* If an NE test, we have an initial value! */
4017 if (GET_CODE (test) == NE)
4019 bl->init_insn = p;
4020 bl->init_set = gen_rtx_SET (VOIDmode,
4021 XEXP (test, 0), XEXP (test, 1));
4023 else
4024 bl->initial_test = test;
4028 /* Look at the each biv and see if we can say anything better about its
4029 initial value from any initializing insns set up above. (This is done
4030 in two passes to avoid missing SETs in a PARALLEL.) */
4031 for (backbl = &loop_iv_list; (bl = *backbl); backbl = &bl->next)
4033 rtx src;
4034 rtx note;
4036 if (! bl->init_insn)
4037 continue;
4039 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4040 is a constant, use the value of that. */
4041 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4042 && CONSTANT_P (XEXP (note, 0)))
4043 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4044 && CONSTANT_P (XEXP (note, 0))))
4045 src = XEXP (note, 0);
4046 else
4047 src = SET_SRC (bl->init_set);
4049 if (loop_dump_stream)
4050 fprintf (loop_dump_stream,
4051 "Biv %d initialized at insn %d: initial value ",
4052 bl->regno, INSN_UID (bl->init_insn));
4054 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4055 || GET_MODE (src) == VOIDmode)
4056 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
4058 bl->initial_value = src;
4060 if (loop_dump_stream)
4062 if (GET_CODE (src) == CONST_INT)
4064 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
4065 fputc ('\n', loop_dump_stream);
4067 else
4069 print_rtl (loop_dump_stream, src);
4070 fprintf (loop_dump_stream, "\n");
4074 else
4076 struct iv_class *bl2 = 0;
4077 rtx increment = NULL_RTX;
4079 /* Biv initial value is not a simple move. If it is the sum of
4080 another biv and a constant, check if both bivs are incremented
4081 in lockstep. Then we are actually looking at a giv.
4082 For simplicity, we only handle the case where there is but a
4083 single increment, and the register is not used elsewhere. */
4084 if (bl->biv_count == 1
4085 && bl->regno < max_reg_before_loop
4086 && uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4087 && GET_CODE (src) == PLUS
4088 && GET_CODE (XEXP (src, 0)) == REG
4089 && CONSTANT_P (XEXP (src, 1))
4090 && ((increment = biv_total_increment (bl, loop_start, loop_end))
4091 != NULL_RTX))
4093 int regno = REGNO (XEXP (src, 0));
4095 for (bl2 = loop_iv_list; bl2; bl2 = bl2->next)
4096 if (bl2->regno == regno)
4097 break;
4100 /* Now, can we transform this biv into a giv? */
4101 if (bl2
4102 && bl2->biv_count == 1
4103 && rtx_equal_p (increment,
4104 biv_total_increment (bl2, loop_start, loop_end))
4105 /* init_insn is only set to insns that are before loop_start
4106 without any intervening labels. */
4107 && ! reg_set_between_p (bl2->biv->src_reg,
4108 PREV_INSN (bl->init_insn), loop_start)
4109 /* The register from BL2 must be set before the register from
4110 BL is set, or we must be able to move the latter set after
4111 the former set. Currently there can't be any labels
4112 in-between when biv_total_increment returns nonzero both times
4113 but we test it here in case some day some real cfg analysis
4114 gets used to set always_computable. */
4115 && (loop_insn_first_p (bl2->biv->insn, bl->biv->insn)
4116 ? no_labels_between_p (bl2->biv->insn, bl->biv->insn)
4117 : (! reg_used_between_p (bl->biv->src_reg, bl->biv->insn,
4118 bl2->biv->insn)
4119 && no_jumps_between_p (bl->biv->insn, bl2->biv->insn)))
4120 && validate_change (bl->biv->insn,
4121 &SET_SRC (single_set (bl->biv->insn)),
4122 copy_rtx (src), 0))
4124 int loop_num = uid_loop_num[INSN_UID (loop_start)];
4125 rtx dominator = loop_number_cont_dominator[loop_num];
4126 rtx giv = bl->biv->src_reg;
4127 rtx giv_insn = bl->biv->insn;
4128 rtx after_giv = NEXT_INSN (giv_insn);
4130 if (loop_dump_stream)
4131 fprintf (loop_dump_stream, "is giv of biv %d\n", bl2->regno);
4132 /* Let this giv be discovered by the generic code. */
4133 REG_IV_TYPE (bl->regno) = UNKNOWN_INDUCT;
4134 reg_biv_class[bl->regno] = NULL_PTR;
4135 /* We can get better optimization if we can move the giv setting
4136 before the first giv use. */
4137 if (dominator
4138 && ! loop_insn_first_p (dominator, scan_start)
4139 && ! reg_set_between_p (bl2->biv->src_reg, loop_start,
4140 dominator)
4141 && ! reg_used_between_p (giv, loop_start, dominator)
4142 && ! reg_used_between_p (giv, giv_insn, loop_end))
4144 rtx p;
4145 rtx next;
4147 for (next = NEXT_INSN (dominator); ; next = NEXT_INSN (next))
4149 if ((GET_RTX_CLASS (GET_CODE (next)) == 'i'
4150 && (reg_mentioned_p (giv, PATTERN (next))
4151 || reg_set_p (bl2->biv->src_reg, next)))
4152 || GET_CODE (next) == JUMP_INSN)
4153 break;
4154 #ifdef HAVE_cc0
4155 if (GET_RTX_CLASS (GET_CODE (next)) != 'i'
4156 || ! sets_cc0_p (PATTERN (next)))
4157 #endif
4158 dominator = next;
4160 if (loop_dump_stream)
4161 fprintf (loop_dump_stream, "move after insn %d\n",
4162 INSN_UID (dominator));
4163 /* Avoid problems with luids by actually moving the insn
4164 and adjusting all luids in the range. */
4165 reorder_insns (giv_insn, giv_insn, dominator);
4166 for (p = dominator; INSN_UID (p) >= max_uid_for_loop; )
4167 p = PREV_INSN (p);
4168 compute_luids (giv_insn, after_giv, INSN_LUID (p));
4169 /* If the only purpose of the init insn is to initialize
4170 this giv, delete it. */
4171 if (single_set (bl->init_insn)
4172 && ! reg_used_between_p (giv, bl->init_insn, loop_start))
4173 delete_insn (bl->init_insn);
4175 else if (! loop_insn_first_p (bl2->biv->insn, bl->biv->insn))
4177 rtx p = PREV_INSN (giv_insn);
4178 while (INSN_UID (p) >= max_uid_for_loop)
4179 p = PREV_INSN (p);
4180 reorder_insns (giv_insn, giv_insn, bl2->biv->insn);
4181 compute_luids (after_giv, NEXT_INSN (giv_insn),
4182 INSN_LUID (p));
4184 /* Remove this biv from the chain. */
4185 if (bl->next)
4187 /* We move the following giv from *bl->next into *bl.
4188 We have to update reg_biv_class for that moved biv
4189 to point to its new address. */
4190 *bl = *bl->next;
4191 reg_biv_class[bl->regno] = bl;
4193 else
4195 *backbl = 0;
4196 break;
4200 /* If we can't make it a giv,
4201 let biv keep initial value of "itself". */
4202 else if (loop_dump_stream)
4203 fprintf (loop_dump_stream, "is complex\n");
4207 /* If a biv is unconditionally incremented several times in a row, convert
4208 all but the last increment into a giv. */
4210 /* Get an upper bound for the number of registers
4211 we might have after all bivs have been processed. */
4212 first_increment_giv = max_reg_num ();
4213 for (n_extra_increment = 0, bl = loop_iv_list; bl; bl = bl->next)
4214 n_extra_increment += bl->biv_count - 1;
4216 /* If the loop contains volatile memory references do not allow any
4217 replacements to take place, since this could loose the volatile markers. */
4218 if (n_extra_increment && ! loop_info->has_volatile)
4220 int nregs = first_increment_giv + n_extra_increment;
4222 /* Reallocate reg_iv_type and reg_iv_info. */
4223 VARRAY_GROW (reg_iv_type, nregs);
4224 VARRAY_GROW (reg_iv_info, nregs);
4226 for (bl = loop_iv_list; bl; bl = bl->next)
4228 struct induction **vp, *v, *next;
4229 int biv_dead_after_loop = 0;
4231 /* The biv increments lists are in reverse order. Fix this first. */
4232 for (v = bl->biv, bl->biv = 0; v; v = next)
4234 next = v->next_iv;
4235 v->next_iv = bl->biv;
4236 bl->biv = v;
4239 /* We must guard against the case that an early exit between v->insn
4240 and next->insn leaves the biv live after the loop, since that
4241 would mean that we'd be missing an increment for the final
4242 value. The following test to set biv_dead_after_loop is like
4243 the first part of the test to set bl->eliminable.
4244 We don't check here if we can calculate the final value, since
4245 this can't succeed if we already know that there is a jump
4246 between v->insn and next->insn, yet next->always_executed is
4247 set and next->maybe_multiple is cleared. Such a combination
4248 implies that the jump destination is outside the loop.
4249 If we want to make this check more sophisticated, we should
4250 check each branch between v->insn and next->insn individually
4251 to see if the biv is dead at its destination. */
4253 if (uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4254 && bl->init_insn
4255 && INSN_UID (bl->init_insn) < max_uid_for_loop
4256 && (uid_luid[REGNO_FIRST_UID (bl->regno)]
4257 >= INSN_LUID (bl->init_insn))
4258 #ifdef HAVE_decrement_and_branch_until_zero
4259 && ! bl->nonneg
4260 #endif
4261 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4262 biv_dead_after_loop = 1;
4264 for (vp = &bl->biv, next = *vp; v = next, next = v->next_iv;)
4266 HOST_WIDE_INT offset;
4267 rtx set, add_val, old_reg, dest_reg, last_use_insn, note;
4268 int old_regno, new_regno;
4270 if (! v->always_executed
4271 || v->maybe_multiple
4272 || GET_CODE (v->add_val) != CONST_INT
4273 || ! next->always_executed
4274 || next->maybe_multiple
4275 || ! CONSTANT_P (next->add_val)
4276 || v->mult_val != const1_rtx
4277 || next->mult_val != const1_rtx
4278 || ! (biv_dead_after_loop
4279 || no_jumps_between_p (v->insn, next->insn)))
4281 vp = &v->next_iv;
4282 continue;
4284 offset = INTVAL (v->add_val);
4285 set = single_set (v->insn);
4286 add_val = plus_constant (next->add_val, offset);
4287 old_reg = v->dest_reg;
4288 dest_reg = gen_reg_rtx (v->mode);
4290 /* Unlike reg_iv_type / reg_iv_info, the other three arrays
4291 have been allocated with some slop space, so we may not
4292 actually need to reallocate them. If we do, the following
4293 if statement will be executed just once in this loop. */
4294 if ((unsigned) max_reg_num () > n_times_set->num_elements)
4296 /* Grow all the remaining arrays. */
4297 VARRAY_GROW (set_in_loop, nregs);
4298 VARRAY_GROW (n_times_set, nregs);
4299 VARRAY_GROW (may_not_optimize, nregs);
4300 VARRAY_GROW (reg_single_usage, nregs);
4303 if (! validate_change (next->insn, next->location, add_val, 0))
4305 vp = &v->next_iv;
4306 continue;
4309 /* Here we can try to eliminate the increment by combining
4310 it into the uses. */
4312 /* Set last_use_insn so that we can check against it. */
4314 for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
4315 p != next->insn;
4316 p = next_insn_in_loop (p, scan_start, end, loop_top))
4318 if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
4319 continue;
4320 if (reg_mentioned_p (old_reg, PATTERN (p)))
4322 last_use_insn = p;
4326 /* If we can't get the LUIDs for the insns, we can't
4327 calculate the lifetime. This is likely from unrolling
4328 of an inner loop, so there is little point in making this
4329 a DEST_REG giv anyways. */
4330 if (INSN_UID (v->insn) >= max_uid_for_loop
4331 || INSN_UID (last_use_insn) >= max_uid_for_loop
4332 || ! validate_change (v->insn, &SET_DEST (set), dest_reg, 0))
4334 /* Change the increment at NEXT back to what it was. */
4335 if (! validate_change (next->insn, next->location,
4336 next->add_val, 0))
4337 abort ();
4338 vp = &v->next_iv;
4339 continue;
4341 next->add_val = add_val;
4342 v->dest_reg = dest_reg;
4343 v->giv_type = DEST_REG;
4344 v->location = &SET_SRC (set);
4345 v->cant_derive = 0;
4346 v->combined_with = 0;
4347 v->maybe_dead = 0;
4348 v->derive_adjustment = 0;
4349 v->same = 0;
4350 v->ignore = 0;
4351 v->new_reg = 0;
4352 v->final_value = 0;
4353 v->same_insn = 0;
4354 v->auto_inc_opt = 0;
4355 v->unrolled = 0;
4356 v->shared = 0;
4357 v->derived_from = 0;
4358 v->always_computable = 1;
4359 v->always_executed = 1;
4360 v->replaceable = 1;
4361 v->no_const_addval = 0;
4363 old_regno = REGNO (old_reg);
4364 new_regno = REGNO (dest_reg);
4365 VARRAY_INT (set_in_loop, old_regno)--;
4366 VARRAY_INT (set_in_loop, new_regno) = 1;
4367 VARRAY_INT (n_times_set, old_regno)--;
4368 VARRAY_INT (n_times_set, new_regno) = 1;
4369 VARRAY_CHAR (may_not_optimize, new_regno) = 0;
4371 REG_IV_TYPE (new_regno) = GENERAL_INDUCT;
4372 REG_IV_INFO (new_regno) = v;
4374 /* If next_insn has a REG_EQUAL note that mentiones OLD_REG,
4375 it must be replaced. */
4376 note = find_reg_note (next->insn, REG_EQUAL, NULL_RTX);
4377 if (note && reg_mentioned_p (old_reg, XEXP (note, 0)))
4378 XEXP (note, 0) = copy_rtx (SET_SRC (single_set (next->insn)));
4380 /* Remove the increment from the list of biv increments,
4381 and record it as a giv. */
4382 *vp = next;
4383 bl->biv_count--;
4384 v->next_iv = bl->giv;
4385 bl->giv = v;
4386 bl->giv_count++;
4387 v->benefit = rtx_cost (SET_SRC (set), SET);
4388 bl->total_benefit += v->benefit;
4390 /* Now replace the biv with DEST_REG in all insns between
4391 the replaced increment and the next increment, and
4392 remember the last insn that needed a replacement. */
4393 for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
4394 p != next->insn;
4395 p = next_insn_in_loop (p, scan_start, end, loop_top))
4397 rtx note;
4399 if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
4400 continue;
4401 if (reg_mentioned_p (old_reg, PATTERN (p)))
4403 last_use_insn = p;
4404 if (! validate_replace_rtx (old_reg, dest_reg, p))
4405 abort ();
4407 for (note = REG_NOTES (p); note; note = XEXP (note, 1))
4409 if (GET_CODE (note) == EXPR_LIST)
4410 XEXP (note, 0)
4411 = replace_rtx (XEXP (note, 0), old_reg, dest_reg);
4415 v->last_use = last_use_insn;
4416 v->lifetime = INSN_LUID (v->insn) - INSN_LUID (last_use_insn);
4417 /* If the lifetime is zero, it means that this register is really
4418 a dead store. So mark this as a giv that can be ignored.
4419 This will not prevent the biv from being eliminated. */
4420 if (v->lifetime == 0)
4421 v->ignore = 1;
4423 if (loop_dump_stream)
4424 fprintf (loop_dump_stream,
4425 "Increment %d of biv %d converted to giv %d.\n\n",
4426 INSN_UID (v->insn), old_regno, new_regno);
4430 last_increment_giv = max_reg_num () - 1;
4432 /* Search the loop for general induction variables. */
4434 /* A register is a giv if: it is only set once, it is a function of a
4435 biv and a constant (or invariant), and it is not a biv. */
4437 not_every_iteration = 0;
4438 loop_depth = 0;
4439 maybe_multiple = 0;
4440 p = scan_start;
4441 while (1)
4443 p = NEXT_INSN (p);
4444 /* At end of a straight-in loop, we are done.
4445 At end of a loop entered at the bottom, scan the top. */
4446 if (p == scan_start)
4447 break;
4448 if (p == end)
4450 if (loop_top != 0)
4451 p = loop_top;
4452 else
4453 break;
4454 if (p == scan_start)
4455 break;
4458 /* Look for a general induction variable in a register. */
4459 if (GET_CODE (p) == INSN
4460 && (set = single_set (p))
4461 && GET_CODE (SET_DEST (set)) == REG
4462 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
4464 rtx src_reg;
4465 rtx add_val;
4466 rtx mult_val;
4467 int benefit;
4468 rtx regnote = 0;
4469 rtx last_consec_insn;
4471 dest_reg = SET_DEST (set);
4472 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4473 continue;
4475 if (/* SET_SRC is a giv. */
4476 (general_induction_var (SET_SRC (set), &src_reg, &add_val,
4477 &mult_val, 0, &benefit)
4478 /* Equivalent expression is a giv. */
4479 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4480 && general_induction_var (XEXP (regnote, 0), &src_reg,
4481 &add_val, &mult_val, 0,
4482 &benefit)))
4483 /* Don't try to handle any regs made by loop optimization.
4484 We have nothing on them in regno_first_uid, etc. */
4485 && REGNO (dest_reg) < max_reg_before_loop
4486 /* Don't recognize a BASIC_INDUCT_VAR here. */
4487 && dest_reg != src_reg
4488 /* This must be the only place where the register is set. */
4489 && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
4490 /* or all sets must be consecutive and make a giv. */
4491 || (benefit = consec_sets_giv (benefit, p,
4492 src_reg, dest_reg,
4493 &add_val, &mult_val,
4494 &last_consec_insn))))
4496 struct induction *v
4497 = (struct induction *) alloca (sizeof (struct induction));
4499 /* If this is a library call, increase benefit. */
4500 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4501 benefit += libcall_benefit (p);
4503 /* Skip the consecutive insns, if there are any. */
4504 if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1)
4505 p = last_consec_insn;
4507 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
4508 DEST_REG, not_every_iteration, maybe_multiple,
4509 NULL_PTR, loop_start, loop_end);
4514 #ifndef DONT_REDUCE_ADDR
4515 /* Look for givs which are memory addresses. */
4516 /* This resulted in worse code on a VAX 8600. I wonder if it
4517 still does. */
4518 if (GET_CODE (p) == INSN)
4519 find_mem_givs (PATTERN (p), p, not_every_iteration, maybe_multiple,
4520 loop_start, loop_end);
4521 #endif
4523 /* Update the status of whether giv can derive other givs. This can
4524 change when we pass a label or an insn that updates a biv. */
4525 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4526 || GET_CODE (p) == CODE_LABEL)
4527 update_giv_derive (p);
4529 /* Past CODE_LABEL, we get to insns that may be executed multiple
4530 times. The only way we can be sure that they can't is if every
4531 every jump insn between here and the end of the loop either
4532 returns, exits the loop, is a forward jump, or is a jump
4533 to the loop start. */
4535 if (GET_CODE (p) == CODE_LABEL)
4537 rtx insn = p;
4539 maybe_multiple = 0;
4541 while (1)
4543 insn = NEXT_INSN (insn);
4544 if (insn == scan_start)
4545 break;
4546 if (insn == end)
4548 if (loop_top != 0)
4549 insn = loop_top;
4550 else
4551 break;
4552 if (insn == scan_start)
4553 break;
4556 if (GET_CODE (insn) == JUMP_INSN
4557 && GET_CODE (PATTERN (insn)) != RETURN
4558 && (! condjump_p (insn)
4559 || (JUMP_LABEL (insn) != 0
4560 && JUMP_LABEL (insn) != scan_start
4561 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
4562 || INSN_UID (insn) >= max_uid_for_loop
4563 || (INSN_LUID (JUMP_LABEL (insn))
4564 < INSN_LUID (insn))))))
4566 maybe_multiple = 1;
4567 break;
4572 /* Past a jump, we get to insns for which we can't count
4573 on whether they will be executed during each iteration. */
4574 /* This code appears twice in strength_reduce. There is also similar
4575 code in scan_loop. */
4576 if (GET_CODE (p) == JUMP_INSN
4577 /* If we enter the loop in the middle, and scan around to the
4578 beginning, don't set not_every_iteration for that.
4579 This can be any kind of jump, since we want to know if insns
4580 will be executed if the loop is executed. */
4581 && ! (JUMP_LABEL (p) == loop_top
4582 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
4583 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
4585 rtx label = 0;
4587 /* If this is a jump outside the loop, then it also doesn't
4588 matter. Check to see if the target of this branch is on the
4589 loop_number_exits_labels list. */
4591 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
4592 label;
4593 label = LABEL_NEXTREF (label))
4594 if (XEXP (label, 0) == JUMP_LABEL (p))
4595 break;
4597 if (! label)
4598 not_every_iteration = 1;
4601 else if (GET_CODE (p) == NOTE)
4603 /* At the virtual top of a converted loop, insns are again known to
4604 be executed each iteration: logically, the loop begins here
4605 even though the exit code has been duplicated.
4607 Insns are also again known to be executed each iteration at
4608 the LOOP_CONT note. */
4609 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4610 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4611 && loop_depth == 0)
4612 not_every_iteration = 0;
4613 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4614 loop_depth++;
4615 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4616 loop_depth--;
4619 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4620 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4621 or not an insn is known to be executed each iteration of the
4622 loop, whether or not any iterations are known to occur.
4624 Therefore, if we have just passed a label and have no more labels
4625 between here and the test insn of the loop, we know these insns
4626 will be executed each iteration. */
4628 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
4629 && no_labels_between_p (p, loop_end)
4630 && loop_insn_first_p (p, loop_cont))
4631 not_every_iteration = 0;
4634 /* Try to calculate and save the number of loop iterations. This is
4635 set to zero if the actual number can not be calculated. This must
4636 be called after all giv's have been identified, since otherwise it may
4637 fail if the iteration variable is a giv. */
4639 loop_iterations (loop_start, loop_end, loop_info);
4641 /* Now for each giv for which we still don't know whether or not it is
4642 replaceable, check to see if it is replaceable because its final value
4643 can be calculated. This must be done after loop_iterations is called,
4644 so that final_giv_value will work correctly. */
4646 for (bl = loop_iv_list; bl; bl = bl->next)
4648 struct induction *v;
4650 for (v = bl->giv; v; v = v->next_iv)
4651 if (! v->replaceable && ! v->not_replaceable)
4652 check_final_value (v, loop_start, loop_end, loop_info->n_iterations);
4655 /* Try to prove that the loop counter variable (if any) is always
4656 nonnegative; if so, record that fact with a REG_NONNEG note
4657 so that "decrement and branch until zero" insn can be used. */
4658 check_dbra_loop (loop_end, insn_count, loop_start, loop_info);
4660 /* Create reg_map to hold substitutions for replaceable giv regs.
4661 Some givs might have been made from biv increments, so look at
4662 reg_iv_type for a suitable size. */
4663 reg_map_size = reg_iv_type->num_elements;
4664 reg_map = (rtx *) alloca (reg_map_size * sizeof (rtx));
4665 bzero ((char *) reg_map, reg_map_size * sizeof (rtx));
4667 /* Examine each iv class for feasibility of strength reduction/induction
4668 variable elimination. */
4670 for (bl = loop_iv_list; bl; bl = bl->next)
4672 struct induction *v;
4673 int benefit;
4674 int all_reduced;
4675 rtx final_value = 0;
4676 unsigned int nregs;
4678 /* Test whether it will be possible to eliminate this biv
4679 provided all givs are reduced. This is possible if either
4680 the reg is not used outside the loop, or we can compute
4681 what its final value will be.
4683 For architectures with a decrement_and_branch_until_zero insn,
4684 don't do this if we put a REG_NONNEG note on the endtest for
4685 this biv. */
4687 /* Compare against bl->init_insn rather than loop_start.
4688 We aren't concerned with any uses of the biv between
4689 init_insn and loop_start since these won't be affected
4690 by the value of the biv elsewhere in the function, so
4691 long as init_insn doesn't use the biv itself.
4692 March 14, 1989 -- self@bayes.arc.nasa.gov */
4694 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4695 && bl->init_insn
4696 && INSN_UID (bl->init_insn) < max_uid_for_loop
4697 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
4698 #ifdef HAVE_decrement_and_branch_until_zero
4699 && ! bl->nonneg
4700 #endif
4701 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4702 || ((final_value = final_biv_value (bl, loop_start, loop_end,
4703 loop_info->n_iterations))
4704 #ifdef HAVE_decrement_and_branch_until_zero
4705 && ! bl->nonneg
4706 #endif
4708 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
4709 threshold, insn_count);
4710 else
4712 if (loop_dump_stream)
4714 fprintf (loop_dump_stream,
4715 "Cannot eliminate biv %d.\n",
4716 bl->regno);
4717 fprintf (loop_dump_stream,
4718 "First use: insn %d, last use: insn %d.\n",
4719 REGNO_FIRST_UID (bl->regno),
4720 REGNO_LAST_UID (bl->regno));
4724 /* Combine all giv's for this iv_class. */
4725 combine_givs (bl);
4727 /* This will be true at the end, if all givs which depend on this
4728 biv have been strength reduced.
4729 We can't (currently) eliminate the biv unless this is so. */
4730 all_reduced = 1;
4732 /* Check each giv in this class to see if we will benefit by reducing
4733 it. Skip giv's combined with others. */
4734 for (v = bl->giv; v; v = v->next_iv)
4736 struct induction *tv;
4738 if (v->ignore || v->same)
4739 continue;
4741 benefit = v->benefit;
4743 /* Reduce benefit if not replaceable, since we will insert
4744 a move-insn to replace the insn that calculates this giv.
4745 Don't do this unless the giv is a user variable, since it
4746 will often be marked non-replaceable because of the duplication
4747 of the exit code outside the loop. In such a case, the copies
4748 we insert are dead and will be deleted. So they don't have
4749 a cost. Similar situations exist. */
4750 /* ??? The new final_[bg]iv_value code does a much better job
4751 of finding replaceable giv's, and hence this code may no longer
4752 be necessary. */
4753 if (! v->replaceable && ! bl->eliminable
4754 && REG_USERVAR_P (v->dest_reg))
4755 benefit -= copy_cost;
4757 /* Decrease the benefit to count the add-insns that we will
4758 insert to increment the reduced reg for the giv. */
4759 benefit -= add_cost * bl->biv_count;
4761 /* Decide whether to strength-reduce this giv or to leave the code
4762 unchanged (recompute it from the biv each time it is used).
4763 This decision can be made independently for each giv. */
4765 #ifdef AUTO_INC_DEC
4766 /* Attempt to guess whether autoincrement will handle some of the
4767 new add insns; if so, increase BENEFIT (undo the subtraction of
4768 add_cost that was done above). */
4769 if (v->giv_type == DEST_ADDR
4770 && GET_CODE (v->mult_val) == CONST_INT)
4772 if (HAVE_POST_INCREMENT
4773 && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4774 benefit += add_cost * bl->biv_count;
4775 else if (HAVE_PRE_INCREMENT
4776 && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4777 benefit += add_cost * bl->biv_count;
4778 else if (HAVE_POST_DECREMENT
4779 && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4780 benefit += add_cost * bl->biv_count;
4781 else if (HAVE_PRE_DECREMENT
4782 && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4783 benefit += add_cost * bl->biv_count;
4785 #endif
4787 /* If an insn is not to be strength reduced, then set its ignore
4788 flag, and clear all_reduced. */
4790 /* A giv that depends on a reversed biv must be reduced if it is
4791 used after the loop exit, otherwise, it would have the wrong
4792 value after the loop exit. To make it simple, just reduce all
4793 of such giv's whether or not we know they are used after the loop
4794 exit. */
4796 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4797 && ! bl->reversed )
4799 if (loop_dump_stream)
4800 fprintf (loop_dump_stream,
4801 "giv of insn %d not worth while, %d vs %d.\n",
4802 INSN_UID (v->insn),
4803 v->lifetime * threshold * benefit, insn_count);
4804 v->ignore = 1;
4805 all_reduced = 0;
4807 else
4809 /* Check that we can increment the reduced giv without a
4810 multiply insn. If not, reject it. */
4812 for (tv = bl->biv; tv; tv = tv->next_iv)
4813 if (tv->mult_val == const1_rtx
4814 && ! product_cheap_p (tv->add_val, v->mult_val))
4816 if (loop_dump_stream)
4817 fprintf (loop_dump_stream,
4818 "giv of insn %d: would need a multiply.\n",
4819 INSN_UID (v->insn));
4820 v->ignore = 1;
4821 all_reduced = 0;
4822 break;
4827 /* Check for givs whose first use is their definition and whose
4828 last use is the definition of another giv. If so, it is likely
4829 dead and should not be used to derive another giv nor to
4830 eliminate a biv. */
4831 for (v = bl->giv; v; v = v->next_iv)
4833 if (v->ignore
4834 || (v->same && v->same->ignore))
4835 continue;
4837 if (v->last_use)
4839 struct induction *v1;
4841 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4842 if (v->last_use == v1->insn)
4843 v->maybe_dead = 1;
4845 else if (v->giv_type == DEST_REG
4846 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4848 struct induction *v1;
4850 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4851 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4852 v->maybe_dead = 1;
4856 /* Now that we know which givs will be reduced, try to rearrange the
4857 combinations to reduce register pressure.
4858 recombine_givs calls find_life_end, which needs reg_iv_type and
4859 reg_iv_info to be valid for all pseudos. We do the necessary
4860 reallocation here since it allows to check if there are still
4861 more bivs to process. */
4862 nregs = max_reg_num ();
4863 if (nregs > reg_iv_type->num_elements)
4865 /* If there are still more bivs to process, allocate some slack
4866 space so that we're not constantly reallocating these arrays. */
4867 if (bl->next)
4868 nregs += nregs / 4;
4869 /* Reallocate reg_iv_type and reg_iv_info. */
4870 VARRAY_GROW (reg_iv_type, nregs);
4871 VARRAY_GROW (reg_iv_info, nregs);
4873 recombine_givs (bl, loop_start, loop_end, unroll_p);
4875 /* Reduce each giv that we decided to reduce. */
4877 for (v = bl->giv; v; v = v->next_iv)
4879 struct induction *tv;
4880 if (! v->ignore && v->same == 0)
4882 int auto_inc_opt = 0;
4884 /* If the code for derived givs immediately below has already
4885 allocated a new_reg, we must keep it. */
4886 if (! v->new_reg)
4887 v->new_reg = gen_reg_rtx (v->mode);
4889 if (v->derived_from)
4891 struct induction *d = v->derived_from;
4893 /* In case d->dest_reg is not replaceable, we have
4894 to replace it in v->insn now. */
4895 if (! d->new_reg)
4896 d->new_reg = gen_reg_rtx (d->mode);
4897 PATTERN (v->insn)
4898 = replace_rtx (PATTERN (v->insn), d->dest_reg, d->new_reg);
4899 PATTERN (v->insn)
4900 = replace_rtx (PATTERN (v->insn), v->dest_reg, v->new_reg);
4901 /* For each place where the biv is incremented, add an
4902 insn to set the new, reduced reg for the giv.
4903 We used to do this only for biv_count != 1, but
4904 this fails when there is a giv after a single biv
4905 increment, e.g. when the last giv was expressed as
4906 pre-decrement. */
4907 for (tv = bl->biv; tv; tv = tv->next_iv)
4909 /* We always emit reduced giv increments before the
4910 biv increment when bl->biv_count != 1. So by
4911 emitting the add insns for derived givs after the
4912 biv increment, they pick up the updated value of
4913 the reduced giv.
4914 If the reduced giv is processed with
4915 auto_inc_opt == 1, then it is incremented earlier
4916 than the biv, hence we'll still pick up the right
4917 value.
4918 If it's processed with auto_inc_opt == -1,
4919 that implies that the biv increment is before the
4920 first reduced giv's use. The derived giv's lifetime
4921 is after the reduced giv's lifetime, hence in this
4922 case, the biv increment doesn't matter. */
4923 emit_insn_after (copy_rtx (PATTERN (v->insn)), tv->insn);
4925 continue;
4928 #ifdef AUTO_INC_DEC
4929 /* If the target has auto-increment addressing modes, and
4930 this is an address giv, then try to put the increment
4931 immediately after its use, so that flow can create an
4932 auto-increment addressing mode. */
4933 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4934 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4935 /* We don't handle reversed biv's because bl->biv->insn
4936 does not have a valid INSN_LUID. */
4937 && ! bl->reversed
4938 && v->always_executed && ! v->maybe_multiple
4939 && INSN_UID (v->insn) < max_uid_for_loop)
4941 /* If other giv's have been combined with this one, then
4942 this will work only if all uses of the other giv's occur
4943 before this giv's insn. This is difficult to check.
4945 We simplify this by looking for the common case where
4946 there is one DEST_REG giv, and this giv's insn is the
4947 last use of the dest_reg of that DEST_REG giv. If the
4948 increment occurs after the address giv, then we can
4949 perform the optimization. (Otherwise, the increment
4950 would have to go before other_giv, and we would not be
4951 able to combine it with the address giv to get an
4952 auto-inc address.) */
4953 if (v->combined_with)
4955 struct induction *other_giv = 0;
4957 for (tv = bl->giv; tv; tv = tv->next_iv)
4958 if (tv->same == v)
4960 if (other_giv)
4961 break;
4962 else
4963 other_giv = tv;
4965 if (! tv && other_giv
4966 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4967 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4968 == INSN_UID (v->insn))
4969 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4970 auto_inc_opt = 1;
4972 /* Check for case where increment is before the address
4973 giv. Do this test in "loop order". */
4974 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4975 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4976 || (INSN_LUID (bl->biv->insn)
4977 > INSN_LUID (scan_start))))
4978 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4979 && (INSN_LUID (scan_start)
4980 < INSN_LUID (bl->biv->insn))))
4981 auto_inc_opt = -1;
4982 else
4983 auto_inc_opt = 1;
4985 #ifdef HAVE_cc0
4987 rtx prev;
4989 /* We can't put an insn immediately after one setting
4990 cc0, or immediately before one using cc0. */
4991 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4992 || (auto_inc_opt == -1
4993 && (prev = prev_nonnote_insn (v->insn)) != 0
4994 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4995 && sets_cc0_p (PATTERN (prev))))
4996 auto_inc_opt = 0;
4998 #endif
5000 if (auto_inc_opt)
5001 v->auto_inc_opt = 1;
5003 #endif
5005 /* For each place where the biv is incremented, add an insn
5006 to increment the new, reduced reg for the giv. */
5007 for (tv = bl->biv; tv; tv = tv->next_iv)
5009 rtx insert_before;
5011 if (! auto_inc_opt)
5012 insert_before = tv->insn;
5013 else if (auto_inc_opt == 1)
5014 insert_before = NEXT_INSN (v->insn);
5015 else
5016 insert_before = v->insn;
5018 if (tv->mult_val == const1_rtx)
5019 emit_iv_add_mult (tv->add_val, v->mult_val,
5020 v->new_reg, v->new_reg, insert_before);
5021 else /* tv->mult_val == const0_rtx */
5022 /* A multiply is acceptable here
5023 since this is presumed to be seldom executed. */
5024 emit_iv_add_mult (tv->add_val, v->mult_val,
5025 v->add_val, v->new_reg, insert_before);
5028 /* Add code at loop start to initialize giv's reduced reg. */
5030 emit_iv_add_mult (bl->initial_value, v->mult_val,
5031 v->add_val, v->new_reg, loop_start);
5035 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5036 as not reduced.
5038 For each giv register that can be reduced now: if replaceable,
5039 substitute reduced reg wherever the old giv occurs;
5040 else add new move insn "giv_reg = reduced_reg". */
5042 for (v = bl->giv; v; v = v->next_iv)
5044 if (v->same && v->same->ignore)
5045 v->ignore = 1;
5047 if (v->ignore)
5048 continue;
5050 /* Update expression if this was combined, in case other giv was
5051 replaced. */
5052 if (v->same)
5053 v->new_reg = replace_rtx (v->new_reg,
5054 v->same->dest_reg, v->same->new_reg);
5056 if (v->giv_type == DEST_ADDR)
5057 /* Store reduced reg as the address in the memref where we found
5058 this giv. */
5059 validate_change (v->insn, v->location, v->new_reg, 0);
5060 else if (v->replaceable)
5062 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5064 #if 0
5065 /* I can no longer duplicate the original problem. Perhaps
5066 this is unnecessary now? */
5068 /* Replaceable; it isn't strictly necessary to delete the old
5069 insn and emit a new one, because v->dest_reg is now dead.
5071 However, especially when unrolling loops, the special
5072 handling for (set REG0 REG1) in the second cse pass may
5073 make v->dest_reg live again. To avoid this problem, emit
5074 an insn to set the original giv reg from the reduced giv.
5075 We can not delete the original insn, since it may be part
5076 of a LIBCALL, and the code in flow that eliminates dead
5077 libcalls will fail if it is deleted. */
5078 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
5079 v->insn);
5080 #endif
5082 else
5084 /* Not replaceable; emit an insn to set the original giv reg from
5085 the reduced giv, same as above. */
5086 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
5087 v->insn);
5090 /* When a loop is reversed, givs which depend on the reversed
5091 biv, and which are live outside the loop, must be set to their
5092 correct final value. This insn is only needed if the giv is
5093 not replaceable. The correct final value is the same as the
5094 value that the giv starts the reversed loop with. */
5095 if (bl->reversed && ! v->replaceable)
5096 emit_iv_add_mult (bl->initial_value, v->mult_val,
5097 v->add_val, v->dest_reg, end_insert_before);
5098 else if (v->final_value)
5100 rtx insert_before;
5102 /* If the loop has multiple exits, emit the insn before the
5103 loop to ensure that it will always be executed no matter
5104 how the loop exits. Otherwise, emit the insn after the loop,
5105 since this is slightly more efficient. */
5106 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
5107 insert_before = loop_start;
5108 else
5109 insert_before = end_insert_before;
5110 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
5111 insert_before);
5113 #if 0
5114 /* If the insn to set the final value of the giv was emitted
5115 before the loop, then we must delete the insn inside the loop
5116 that sets it. If this is a LIBCALL, then we must delete
5117 every insn in the libcall. Note, however, that
5118 final_giv_value will only succeed when there are multiple
5119 exits if the giv is dead at each exit, hence it does not
5120 matter that the original insn remains because it is dead
5121 anyways. */
5122 /* Delete the insn inside the loop that sets the giv since
5123 the giv is now set before (or after) the loop. */
5124 delete_insn (v->insn);
5125 #endif
5128 if (loop_dump_stream)
5130 fprintf (loop_dump_stream, "giv at %d reduced to ",
5131 INSN_UID (v->insn));
5132 print_rtl (loop_dump_stream, v->new_reg);
5133 fprintf (loop_dump_stream, "\n");
5137 /* All the givs based on the biv bl have been reduced if they
5138 merit it. */
5140 /* For each giv not marked as maybe dead that has been combined with a
5141 second giv, clear any "maybe dead" mark on that second giv.
5142 v->new_reg will either be or refer to the register of the giv it
5143 combined with.
5145 Doing this clearing avoids problems in biv elimination where a
5146 giv's new_reg is a complex value that can't be put in the insn but
5147 the giv combined with (with a reg as new_reg) is marked maybe_dead.
5148 Since the register will be used in either case, we'd prefer it be
5149 used from the simpler giv. */
5151 for (v = bl->giv; v; v = v->next_iv)
5152 if (! v->maybe_dead && v->same)
5153 v->same->maybe_dead = 0;
5155 /* Try to eliminate the biv, if it is a candidate.
5156 This won't work if ! all_reduced,
5157 since the givs we planned to use might not have been reduced.
5159 We have to be careful that we didn't initially think we could eliminate
5160 this biv because of a giv that we now think may be dead and shouldn't
5161 be used as a biv replacement.
5163 Also, there is the possibility that we may have a giv that looks
5164 like it can be used to eliminate a biv, but the resulting insn
5165 isn't valid. This can happen, for example, on the 88k, where a
5166 JUMP_INSN can compare a register only with zero. Attempts to
5167 replace it with a compare with a constant will fail.
5169 Note that in cases where this call fails, we may have replaced some
5170 of the occurrences of the biv with a giv, but no harm was done in
5171 doing so in the rare cases where it can occur. */
5173 if (all_reduced == 1 && bl->eliminable
5174 && maybe_eliminate_biv (bl, loop_start, end, 1,
5175 threshold, insn_count))
5178 /* ?? If we created a new test to bypass the loop entirely,
5179 or otherwise drop straight in, based on this test, then
5180 we might want to rewrite it also. This way some later
5181 pass has more hope of removing the initialization of this
5182 biv entirely. */
5184 /* If final_value != 0, then the biv may be used after loop end
5185 and we must emit an insn to set it just in case.
5187 Reversed bivs already have an insn after the loop setting their
5188 value, so we don't need another one. We can't calculate the
5189 proper final value for such a biv here anyways. */
5190 if (final_value != 0 && ! bl->reversed)
5192 rtx insert_before;
5194 /* If the loop has multiple exits, emit the insn before the
5195 loop to ensure that it will always be executed no matter
5196 how the loop exits. Otherwise, emit the insn after the
5197 loop, since this is slightly more efficient. */
5198 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
5199 insert_before = loop_start;
5200 else
5201 insert_before = end_insert_before;
5203 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
5204 end_insert_before);
5207 #if 0
5208 /* Delete all of the instructions inside the loop which set
5209 the biv, as they are all dead. If is safe to delete them,
5210 because an insn setting a biv will never be part of a libcall. */
5211 /* However, deleting them will invalidate the regno_last_uid info,
5212 so keeping them around is more convenient. Final_biv_value
5213 will only succeed when there are multiple exits if the biv
5214 is dead at each exit, hence it does not matter that the original
5215 insn remains, because it is dead anyways. */
5216 for (v = bl->biv; v; v = v->next_iv)
5217 delete_insn (v->insn);
5218 #endif
5220 if (loop_dump_stream)
5221 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5222 bl->regno);
5226 /* Go through all the instructions in the loop, making all the
5227 register substitutions scheduled in REG_MAP. */
5229 for (p = loop_start; p != end; p = NEXT_INSN (p))
5230 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5231 || GET_CODE (p) == CALL_INSN)
5233 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5234 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5235 INSN_CODE (p) = -1;
5238 if (loop_info->n_iterations > 0)
5240 /* When we completely unroll a loop we will likely not need the increment
5241 of the loop BIV and we will not need the conditional branch at the
5242 end of the loop. */
5243 unrolled_insn_copies = insn_count - 2;
5245 #ifdef HAVE_cc0
5246 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5247 need the comparison before the conditional branch at the end of the
5248 loop. */
5249 unrolled_insn_copies -= 1;
5250 #endif
5252 /* We'll need one copy for each loop iteration. */
5253 unrolled_insn_copies *= loop_info->n_iterations;
5255 /* A little slop to account for the ability to remove initialization
5256 code, better CSE, and other secondary benefits of completely
5257 unrolling some loops. */
5258 unrolled_insn_copies -= 1;
5260 /* Clamp the value. */
5261 if (unrolled_insn_copies < 0)
5262 unrolled_insn_copies = 0;
5265 /* Unroll loops from within strength reduction so that we can use the
5266 induction variable information that strength_reduce has already
5267 collected. Always unroll loops that would be as small or smaller
5268 unrolled than when rolled. */
5269 if (unroll_p
5270 || (loop_info->n_iterations > 0
5271 && unrolled_insn_copies <= insn_count))
5272 unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
5273 loop_info, 1);
5275 #ifdef HAVE_decrement_and_branch_on_count
5276 /* Instrument the loop with BCT insn. */
5277 if (HAVE_decrement_and_branch_on_count && bct_p
5278 && flag_branch_on_count_reg)
5279 insert_bct (loop_start, loop_end, loop_info);
5280 #endif /* HAVE_decrement_and_branch_on_count */
5282 if (loop_dump_stream)
5283 fprintf (loop_dump_stream, "\n");
5285 egress:
5286 VARRAY_FREE (reg_iv_type);
5287 VARRAY_FREE (reg_iv_info);
5290 /* Return 1 if X is a valid source for an initial value (or as value being
5291 compared against in an initial test).
5293 X must be either a register or constant and must not be clobbered between
5294 the current insn and the start of the loop.
5296 INSN is the insn containing X. */
5298 static int
5299 valid_initial_value_p (x, insn, call_seen, loop_start)
5300 rtx x;
5301 rtx insn;
5302 int call_seen;
5303 rtx loop_start;
5305 if (CONSTANT_P (x))
5306 return 1;
5308 /* Only consider pseudos we know about initialized in insns whose luids
5309 we know. */
5310 if (GET_CODE (x) != REG
5311 || REGNO (x) >= max_reg_before_loop)
5312 return 0;
5314 /* Don't use call-clobbered registers across a call which clobbers it. On
5315 some machines, don't use any hard registers at all. */
5316 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5317 && (SMALL_REGISTER_CLASSES
5318 || (call_used_regs[REGNO (x)] && call_seen)))
5319 return 0;
5321 /* Don't use registers that have been clobbered before the start of the
5322 loop. */
5323 if (reg_set_between_p (x, insn, loop_start))
5324 return 0;
5326 return 1;
5329 /* Scan X for memory refs and check each memory address
5330 as a possible giv. INSN is the insn whose pattern X comes from.
5331 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5332 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5333 more thanonce in each loop iteration. */
5335 static void
5336 find_mem_givs (x, insn, not_every_iteration, maybe_multiple, loop_start,
5337 loop_end)
5338 rtx x;
5339 rtx insn;
5340 int not_every_iteration, maybe_multiple;
5341 rtx loop_start, loop_end;
5343 register int i, j;
5344 register enum rtx_code code;
5345 register const char *fmt;
5347 if (x == 0)
5348 return;
5350 code = GET_CODE (x);
5351 switch (code)
5353 case REG:
5354 case CONST_INT:
5355 case CONST:
5356 case CONST_DOUBLE:
5357 case SYMBOL_REF:
5358 case LABEL_REF:
5359 case PC:
5360 case CC0:
5361 case ADDR_VEC:
5362 case ADDR_DIFF_VEC:
5363 case USE:
5364 case CLOBBER:
5365 return;
5367 case MEM:
5369 rtx src_reg;
5370 rtx add_val;
5371 rtx mult_val;
5372 int benefit;
5374 /* This code used to disable creating GIVs with mult_val == 1 and
5375 add_val == 0. However, this leads to lost optimizations when
5376 it comes time to combine a set of related DEST_ADDR GIVs, since
5377 this one would not be seen. */
5379 if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
5380 &mult_val, 1, &benefit))
5382 /* Found one; record it. */
5383 struct induction *v
5384 = (struct induction *) oballoc (sizeof (struct induction));
5386 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
5387 add_val, benefit, DEST_ADDR, not_every_iteration,
5388 maybe_multiple, &XEXP (x, 0), loop_start, loop_end);
5390 v->mem_mode = GET_MODE (x);
5393 return;
5395 default:
5396 break;
5399 /* Recursively scan the subexpressions for other mem refs. */
5401 fmt = GET_RTX_FORMAT (code);
5402 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5403 if (fmt[i] == 'e')
5404 find_mem_givs (XEXP (x, i), insn, not_every_iteration, maybe_multiple,
5405 loop_start, loop_end);
5406 else if (fmt[i] == 'E')
5407 for (j = 0; j < XVECLEN (x, i); j++)
5408 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
5409 maybe_multiple, loop_start, loop_end);
5412 /* Fill in the data about one biv update.
5413 V is the `struct induction' in which we record the biv. (It is
5414 allocated by the caller, with alloca.)
5415 INSN is the insn that sets it.
5416 DEST_REG is the biv's reg.
5418 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5419 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5420 being set to INC_VAL.
5422 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5423 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5424 can be executed more than once per iteration. If MAYBE_MULTIPLE
5425 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5426 executed exactly once per iteration. */
5428 static void
5429 record_biv (v, insn, dest_reg, inc_val, mult_val, location,
5430 not_every_iteration, maybe_multiple)
5431 struct induction *v;
5432 rtx insn;
5433 rtx dest_reg;
5434 rtx inc_val;
5435 rtx mult_val;
5436 rtx *location;
5437 int not_every_iteration;
5438 int maybe_multiple;
5440 struct iv_class *bl;
5442 v->insn = insn;
5443 v->src_reg = dest_reg;
5444 v->dest_reg = dest_reg;
5445 v->mult_val = mult_val;
5446 v->add_val = inc_val;
5447 v->location = location;
5448 v->mode = GET_MODE (dest_reg);
5449 v->always_computable = ! not_every_iteration;
5450 v->always_executed = ! not_every_iteration;
5451 v->maybe_multiple = maybe_multiple;
5453 /* Add this to the reg's iv_class, creating a class
5454 if this is the first incrementation of the reg. */
5456 bl = reg_biv_class[REGNO (dest_reg)];
5457 if (bl == 0)
5459 /* Create and initialize new iv_class. */
5461 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
5463 bl->regno = REGNO (dest_reg);
5464 bl->biv = 0;
5465 bl->giv = 0;
5466 bl->biv_count = 0;
5467 bl->giv_count = 0;
5469 /* Set initial value to the reg itself. */
5470 bl->initial_value = dest_reg;
5471 /* We haven't seen the initializing insn yet */
5472 bl->init_insn = 0;
5473 bl->init_set = 0;
5474 bl->initial_test = 0;
5475 bl->incremented = 0;
5476 bl->eliminable = 0;
5477 bl->nonneg = 0;
5478 bl->reversed = 0;
5479 bl->total_benefit = 0;
5481 /* Add this class to loop_iv_list. */
5482 bl->next = loop_iv_list;
5483 loop_iv_list = bl;
5485 /* Put it in the array of biv register classes. */
5486 reg_biv_class[REGNO (dest_reg)] = bl;
5489 /* Update IV_CLASS entry for this biv. */
5490 v->next_iv = bl->biv;
5491 bl->biv = v;
5492 bl->biv_count++;
5493 if (mult_val == const1_rtx)
5494 bl->incremented = 1;
5496 if (loop_dump_stream)
5498 fprintf (loop_dump_stream,
5499 "Insn %d: possible biv, reg %d,",
5500 INSN_UID (insn), REGNO (dest_reg));
5501 if (GET_CODE (inc_val) == CONST_INT)
5503 fprintf (loop_dump_stream, " const =");
5504 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
5505 fputc ('\n', loop_dump_stream);
5507 else
5509 fprintf (loop_dump_stream, " const = ");
5510 print_rtl (loop_dump_stream, inc_val);
5511 fprintf (loop_dump_stream, "\n");
5516 /* Fill in the data about one giv.
5517 V is the `struct induction' in which we record the giv. (It is
5518 allocated by the caller, with alloca.)
5519 INSN is the insn that sets it.
5520 BENEFIT estimates the savings from deleting this insn.
5521 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5522 into a register or is used as a memory address.
5524 SRC_REG is the biv reg which the giv is computed from.
5525 DEST_REG is the giv's reg (if the giv is stored in a reg).
5526 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5527 LOCATION points to the place where this giv's value appears in INSN. */
5529 static void
5530 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
5531 type, not_every_iteration, maybe_multiple, location, loop_start,
5532 loop_end)
5533 struct induction *v;
5534 rtx insn;
5535 rtx src_reg;
5536 rtx dest_reg;
5537 rtx mult_val, add_val;
5538 int benefit;
5539 enum g_types type;
5540 int not_every_iteration, maybe_multiple;
5541 rtx *location;
5542 rtx loop_start, loop_end;
5544 struct induction *b;
5545 struct iv_class *bl;
5546 rtx set = single_set (insn);
5548 v->insn = insn;
5549 v->src_reg = src_reg;
5550 v->giv_type = type;
5551 v->dest_reg = dest_reg;
5552 v->mult_val = mult_val;
5553 v->add_val = add_val;
5554 v->benefit = benefit;
5555 v->location = location;
5556 v->cant_derive = 0;
5557 v->combined_with = 0;
5558 v->maybe_multiple = maybe_multiple;
5559 v->maybe_dead = 0;
5560 v->derive_adjustment = 0;
5561 v->same = 0;
5562 v->ignore = 0;
5563 v->new_reg = 0;
5564 v->final_value = 0;
5565 v->same_insn = 0;
5566 v->auto_inc_opt = 0;
5567 v->unrolled = 0;
5568 v->shared = 0;
5569 v->derived_from = 0;
5570 v->last_use = 0;
5572 /* The v->always_computable field is used in update_giv_derive, to
5573 determine whether a giv can be used to derive another giv. For a
5574 DEST_REG giv, INSN computes a new value for the giv, so its value
5575 isn't computable if INSN insn't executed every iteration.
5576 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5577 it does not compute a new value. Hence the value is always computable
5578 regardless of whether INSN is executed each iteration. */
5580 if (type == DEST_ADDR)
5581 v->always_computable = 1;
5582 else
5583 v->always_computable = ! not_every_iteration;
5585 v->always_executed = ! not_every_iteration;
5587 if (type == DEST_ADDR)
5589 v->mode = GET_MODE (*location);
5590 v->lifetime = 1;
5592 else /* type == DEST_REG */
5594 v->mode = GET_MODE (SET_DEST (set));
5596 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
5597 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
5599 /* If the lifetime is zero, it means that this register is
5600 really a dead store. So mark this as a giv that can be
5601 ignored. This will not prevent the biv from being eliminated. */
5602 if (v->lifetime == 0)
5603 v->ignore = 1;
5605 REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
5606 REG_IV_INFO (REGNO (dest_reg)) = v;
5609 /* Add the giv to the class of givs computed from one biv. */
5611 bl = reg_biv_class[REGNO (src_reg)];
5612 if (bl)
5614 v->next_iv = bl->giv;
5615 bl->giv = v;
5616 /* Don't count DEST_ADDR. This is supposed to count the number of
5617 insns that calculate givs. */
5618 if (type == DEST_REG)
5619 bl->giv_count++;
5620 bl->total_benefit += benefit;
5622 else
5623 /* Fatal error, biv missing for this giv? */
5624 abort ();
5626 if (type == DEST_ADDR)
5627 v->replaceable = 1;
5628 else
5630 /* The giv can be replaced outright by the reduced register only if all
5631 of the following conditions are true:
5632 - the insn that sets the giv is always executed on any iteration
5633 on which the giv is used at all
5634 (there are two ways to deduce this:
5635 either the insn is executed on every iteration,
5636 or all uses follow that insn in the same basic block),
5637 - the giv is not used outside the loop
5638 - no assignments to the biv occur during the giv's lifetime. */
5640 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5641 /* Previous line always fails if INSN was moved by loop opt. */
5642 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
5643 && (! not_every_iteration
5644 || last_use_this_basic_block (dest_reg, insn)))
5646 /* Now check that there are no assignments to the biv within the
5647 giv's lifetime. This requires two separate checks. */
5649 /* Check each biv update, and fail if any are between the first
5650 and last use of the giv.
5652 If this loop contains an inner loop that was unrolled, then
5653 the insn modifying the biv may have been emitted by the loop
5654 unrolling code, and hence does not have a valid luid. Just
5655 mark the biv as not replaceable in this case. It is not very
5656 useful as a biv, because it is used in two different loops.
5657 It is very unlikely that we would be able to optimize the giv
5658 using this biv anyways. */
5660 v->replaceable = 1;
5661 for (b = bl->biv; b; b = b->next_iv)
5663 if (INSN_UID (b->insn) >= max_uid_for_loop
5664 || ((uid_luid[INSN_UID (b->insn)]
5665 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
5666 && (uid_luid[INSN_UID (b->insn)]
5667 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
5669 v->replaceable = 0;
5670 v->not_replaceable = 1;
5671 break;
5675 /* If there are any backwards branches that go from after the
5676 biv update to before it, then this giv is not replaceable. */
5677 if (v->replaceable)
5678 for (b = bl->biv; b; b = b->next_iv)
5679 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
5681 v->replaceable = 0;
5682 v->not_replaceable = 1;
5683 break;
5686 else
5688 /* May still be replaceable, we don't have enough info here to
5689 decide. */
5690 v->replaceable = 0;
5691 v->not_replaceable = 0;
5695 /* Record whether the add_val contains a const_int, for later use by
5696 combine_givs. */
5698 rtx tem = add_val;
5700 v->no_const_addval = 1;
5701 if (tem == const0_rtx)
5703 else if (GET_CODE (tem) == CONST_INT)
5704 v->no_const_addval = 0;
5705 else if (GET_CODE (tem) == PLUS)
5707 while (1)
5709 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5710 tem = XEXP (tem, 0);
5711 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5712 tem = XEXP (tem, 1);
5713 else
5714 break;
5716 if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
5717 v->no_const_addval = 0;
5721 if (loop_dump_stream)
5723 if (type == DEST_REG)
5724 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
5725 INSN_UID (insn), REGNO (dest_reg));
5726 else
5727 fprintf (loop_dump_stream, "Insn %d: dest address",
5728 INSN_UID (insn));
5730 fprintf (loop_dump_stream, " src reg %d benefit %d",
5731 REGNO (src_reg), v->benefit);
5732 fprintf (loop_dump_stream, " lifetime %d",
5733 v->lifetime);
5735 if (v->replaceable)
5736 fprintf (loop_dump_stream, " replaceable");
5738 if (v->no_const_addval)
5739 fprintf (loop_dump_stream, " ncav");
5741 if (GET_CODE (mult_val) == CONST_INT)
5743 fprintf (loop_dump_stream, " mult ");
5744 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
5746 else
5748 fprintf (loop_dump_stream, " mult ");
5749 print_rtl (loop_dump_stream, mult_val);
5752 if (GET_CODE (add_val) == CONST_INT)
5754 fprintf (loop_dump_stream, " add ");
5755 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
5757 else
5759 fprintf (loop_dump_stream, " add ");
5760 print_rtl (loop_dump_stream, add_val);
5764 if (loop_dump_stream)
5765 fprintf (loop_dump_stream, "\n");
5770 /* All this does is determine whether a giv can be made replaceable because
5771 its final value can be calculated. This code can not be part of record_giv
5772 above, because final_giv_value requires that the number of loop iterations
5773 be known, and that can not be accurately calculated until after all givs
5774 have been identified. */
5776 static void
5777 check_final_value (v, loop_start, loop_end, n_iterations)
5778 struct induction *v;
5779 rtx loop_start, loop_end;
5780 unsigned HOST_WIDE_INT n_iterations;
5782 struct iv_class *bl;
5783 rtx final_value = 0;
5785 bl = reg_biv_class[REGNO (v->src_reg)];
5787 /* DEST_ADDR givs will never reach here, because they are always marked
5788 replaceable above in record_giv. */
5790 /* The giv can be replaced outright by the reduced register only if all
5791 of the following conditions are true:
5792 - the insn that sets the giv is always executed on any iteration
5793 on which the giv is used at all
5794 (there are two ways to deduce this:
5795 either the insn is executed on every iteration,
5796 or all uses follow that insn in the same basic block),
5797 - its final value can be calculated (this condition is different
5798 than the one above in record_giv)
5799 - no assignments to the biv occur during the giv's lifetime. */
5801 #if 0
5802 /* This is only called now when replaceable is known to be false. */
5803 /* Clear replaceable, so that it won't confuse final_giv_value. */
5804 v->replaceable = 0;
5805 #endif
5807 if ((final_value = final_giv_value (v, loop_start, loop_end, n_iterations))
5808 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5810 int biv_increment_seen = 0;
5811 rtx p = v->insn;
5812 rtx last_giv_use;
5814 v->replaceable = 1;
5816 /* When trying to determine whether or not a biv increment occurs
5817 during the lifetime of the giv, we can ignore uses of the variable
5818 outside the loop because final_value is true. Hence we can not
5819 use regno_last_uid and regno_first_uid as above in record_giv. */
5821 /* Search the loop to determine whether any assignments to the
5822 biv occur during the giv's lifetime. Start with the insn
5823 that sets the giv, and search around the loop until we come
5824 back to that insn again.
5826 Also fail if there is a jump within the giv's lifetime that jumps
5827 to somewhere outside the lifetime but still within the loop. This
5828 catches spaghetti code where the execution order is not linear, and
5829 hence the above test fails. Here we assume that the giv lifetime
5830 does not extend from one iteration of the loop to the next, so as
5831 to make the test easier. Since the lifetime isn't known yet,
5832 this requires two loops. See also record_giv above. */
5834 last_giv_use = v->insn;
5836 while (1)
5838 p = NEXT_INSN (p);
5839 if (p == loop_end)
5840 p = NEXT_INSN (loop_start);
5841 if (p == v->insn)
5842 break;
5844 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5845 || GET_CODE (p) == CALL_INSN)
5847 if (biv_increment_seen)
5849 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5851 v->replaceable = 0;
5852 v->not_replaceable = 1;
5853 break;
5856 else if (reg_set_p (v->src_reg, PATTERN (p)))
5857 biv_increment_seen = 1;
5858 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5859 last_giv_use = p;
5863 /* Now that the lifetime of the giv is known, check for branches
5864 from within the lifetime to outside the lifetime if it is still
5865 replaceable. */
5867 if (v->replaceable)
5869 p = v->insn;
5870 while (1)
5872 p = NEXT_INSN (p);
5873 if (p == loop_end)
5874 p = NEXT_INSN (loop_start);
5875 if (p == last_giv_use)
5876 break;
5878 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5879 && LABEL_NAME (JUMP_LABEL (p))
5880 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5881 && loop_insn_first_p (loop_start, JUMP_LABEL (p)))
5882 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5883 && loop_insn_first_p (JUMP_LABEL (p), loop_end))))
5885 v->replaceable = 0;
5886 v->not_replaceable = 1;
5888 if (loop_dump_stream)
5889 fprintf (loop_dump_stream,
5890 "Found branch outside giv lifetime.\n");
5892 break;
5897 /* If it is replaceable, then save the final value. */
5898 if (v->replaceable)
5899 v->final_value = final_value;
5902 if (loop_dump_stream && v->replaceable)
5903 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5904 INSN_UID (v->insn), REGNO (v->dest_reg));
5907 /* Update the status of whether a giv can derive other givs.
5909 We need to do something special if there is or may be an update to the biv
5910 between the time the giv is defined and the time it is used to derive
5911 another giv.
5913 In addition, a giv that is only conditionally set is not allowed to
5914 derive another giv once a label has been passed.
5916 The cases we look at are when a label or an update to a biv is passed. */
5918 static void
5919 update_giv_derive (p)
5920 rtx p;
5922 struct iv_class *bl;
5923 struct induction *biv, *giv;
5924 rtx tem;
5925 int dummy;
5927 /* Search all IV classes, then all bivs, and finally all givs.
5929 There are three cases we are concerned with. First we have the situation
5930 of a giv that is only updated conditionally. In that case, it may not
5931 derive any givs after a label is passed.
5933 The second case is when a biv update occurs, or may occur, after the
5934 definition of a giv. For certain biv updates (see below) that are
5935 known to occur between the giv definition and use, we can adjust the
5936 giv definition. For others, or when the biv update is conditional,
5937 we must prevent the giv from deriving any other givs. There are two
5938 sub-cases within this case.
5940 If this is a label, we are concerned with any biv update that is done
5941 conditionally, since it may be done after the giv is defined followed by
5942 a branch here (actually, we need to pass both a jump and a label, but
5943 this extra tracking doesn't seem worth it).
5945 If this is a jump, we are concerned about any biv update that may be
5946 executed multiple times. We are actually only concerned about
5947 backward jumps, but it is probably not worth performing the test
5948 on the jump again here.
5950 If this is a biv update, we must adjust the giv status to show that a
5951 subsequent biv update was performed. If this adjustment cannot be done,
5952 the giv cannot derive further givs. */
5954 for (bl = loop_iv_list; bl; bl = bl->next)
5955 for (biv = bl->biv; biv; biv = biv->next_iv)
5956 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5957 || biv->insn == p)
5959 for (giv = bl->giv; giv; giv = giv->next_iv)
5961 /* If cant_derive is already true, there is no point in
5962 checking all of these conditions again. */
5963 if (giv->cant_derive)
5964 continue;
5966 /* If this giv is conditionally set and we have passed a label,
5967 it cannot derive anything. */
5968 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5969 giv->cant_derive = 1;
5971 /* Skip givs that have mult_val == 0, since
5972 they are really invariants. Also skip those that are
5973 replaceable, since we know their lifetime doesn't contain
5974 any biv update. */
5975 else if (giv->mult_val == const0_rtx || giv->replaceable)
5976 continue;
5978 /* The only way we can allow this giv to derive another
5979 is if this is a biv increment and we can form the product
5980 of biv->add_val and giv->mult_val. In this case, we will
5981 be able to compute a compensation. */
5982 else if (biv->insn == p)
5984 tem = 0;
5986 if (biv->mult_val == const1_rtx)
5987 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5988 biv->add_val,
5989 giv->mult_val),
5990 &dummy);
5992 if (tem && giv->derive_adjustment)
5993 tem = simplify_giv_expr
5994 (gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5995 &dummy);
5997 if (tem)
5998 giv->derive_adjustment = tem;
5999 else
6000 giv->cant_derive = 1;
6002 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6003 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6004 giv->cant_derive = 1;
6009 /* Check whether an insn is an increment legitimate for a basic induction var.
6010 X is the source of insn P, or a part of it.
6011 MODE is the mode in which X should be interpreted.
6013 DEST_REG is the putative biv, also the destination of the insn.
6014 We accept patterns of these forms:
6015 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6016 REG = INVARIANT + REG
6018 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6019 store the additive term into *INC_VAL, and store the place where
6020 we found the additive term into *LOCATION.
6022 If X is an assignment of an invariant into DEST_REG, we set
6023 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6025 We also want to detect a BIV when it corresponds to a variable
6026 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6027 of the variable may be a PLUS that adds a SUBREG of that variable to
6028 an invariant and then sign- or zero-extends the result of the PLUS
6029 into the variable.
6031 Most GIVs in such cases will be in the promoted mode, since that is the
6032 probably the natural computation mode (and almost certainly the mode
6033 used for addresses) on the machine. So we view the pseudo-reg containing
6034 the variable as the BIV, as if it were simply incremented.
6036 Note that treating the entire pseudo as a BIV will result in making
6037 simple increments to any GIVs based on it. However, if the variable
6038 overflows in its declared mode but not its promoted mode, the result will
6039 be incorrect. This is acceptable if the variable is signed, since
6040 overflows in such cases are undefined, but not if it is unsigned, since
6041 those overflows are defined. So we only check for SIGN_EXTEND and
6042 not ZERO_EXTEND.
6044 If we cannot find a biv, we return 0. */
6046 static int
6047 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val, location)
6048 register rtx x;
6049 enum machine_mode mode;
6050 rtx p;
6051 rtx dest_reg;
6052 rtx *inc_val;
6053 rtx *mult_val;
6054 rtx **location;
6056 register enum rtx_code code;
6057 rtx *argp, arg;
6058 rtx insn, set = 0;
6060 code = GET_CODE (x);
6061 *location = NULL;
6062 switch (code)
6064 case PLUS:
6065 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6066 || (GET_CODE (XEXP (x, 0)) == SUBREG
6067 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6068 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6070 argp = &XEXP (x, 1);
6072 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6073 || (GET_CODE (XEXP (x, 1)) == SUBREG
6074 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6075 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6077 argp = &XEXP (x, 0);
6079 else
6080 return 0;
6082 arg = *argp;
6083 if (invariant_p (arg) != 1)
6084 return 0;
6086 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6087 *mult_val = const1_rtx;
6088 *location = argp;
6089 return 1;
6091 case SUBREG:
6092 /* If this is a SUBREG for a promoted variable, check the inner
6093 value. */
6094 if (SUBREG_PROMOTED_VAR_P (x))
6095 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
6096 dest_reg, p, inc_val, mult_val, location);
6097 return 0;
6099 case REG:
6100 /* If this register is assigned in a previous insn, look at its
6101 source, but don't go outside the loop or past a label. */
6103 insn = p;
6104 while (1)
6106 do {
6107 insn = PREV_INSN (insn);
6108 } while (insn && GET_CODE (insn) == NOTE
6109 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6111 if (!insn)
6112 break;
6113 set = single_set (insn);
6114 if (set == 0)
6115 break;
6117 if ((SET_DEST (set) == x
6118 || (GET_CODE (SET_DEST (set)) == SUBREG
6119 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
6120 <= UNITS_PER_WORD)
6121 && (GET_MODE_CLASS (GET_MODE (SET_DEST (set)))
6122 == MODE_INT)
6123 && SUBREG_REG (SET_DEST (set)) == x))
6124 && basic_induction_var (SET_SRC (set),
6125 (GET_MODE (SET_SRC (set)) == VOIDmode
6126 ? GET_MODE (x)
6127 : GET_MODE (SET_SRC (set))),
6128 dest_reg, insn,
6129 inc_val, mult_val, location))
6130 return 1;
6132 /* ... fall through ... */
6134 /* Can accept constant setting of biv only when inside inner most loop.
6135 Otherwise, a biv of an inner loop may be incorrectly recognized
6136 as a biv of the outer loop,
6137 causing code to be moved INTO the inner loop. */
6138 case MEM:
6139 if (invariant_p (x) != 1)
6140 return 0;
6141 case CONST_INT:
6142 case SYMBOL_REF:
6143 case CONST:
6144 /* convert_modes aborts if we try to convert to or from CCmode, so just
6145 exclude that case. It is very unlikely that a condition code value
6146 would be a useful iterator anyways. */
6147 if (this_loop_info.loops_enclosed == 1
6148 && GET_MODE_CLASS (mode) != MODE_CC
6149 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
6151 /* Possible bug here? Perhaps we don't know the mode of X. */
6152 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6153 *mult_val = const0_rtx;
6154 return 1;
6156 else
6157 return 0;
6159 case SIGN_EXTEND:
6160 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6161 dest_reg, p, inc_val, mult_val, location);
6163 case ASHIFTRT:
6164 /* Similar, since this can be a sign extension. */
6165 for (insn = PREV_INSN (p);
6166 (insn && GET_CODE (insn) == NOTE
6167 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6168 insn = PREV_INSN (insn))
6171 if (insn)
6172 set = single_set (insn);
6174 if (set && SET_DEST (set) == XEXP (x, 0)
6175 && GET_CODE (XEXP (x, 1)) == CONST_INT
6176 && INTVAL (XEXP (x, 1)) >= 0
6177 && GET_CODE (SET_SRC (set)) == ASHIFT
6178 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6179 return basic_induction_var (XEXP (SET_SRC (set), 0),
6180 GET_MODE (XEXP (x, 0)),
6181 dest_reg, insn, inc_val, mult_val,
6182 location);
6183 return 0;
6185 default:
6186 return 0;
6190 /* A general induction variable (giv) is any quantity that is a linear
6191 function of a basic induction variable,
6192 i.e. giv = biv * mult_val + add_val.
6193 The coefficients can be any loop invariant quantity.
6194 A giv need not be computed directly from the biv;
6195 it can be computed by way of other givs. */
6197 /* Determine whether X computes a giv.
6198 If it does, return a nonzero value
6199 which is the benefit from eliminating the computation of X;
6200 set *SRC_REG to the register of the biv that it is computed from;
6201 set *ADD_VAL and *MULT_VAL to the coefficients,
6202 such that the value of X is biv * mult + add; */
6204 static int
6205 general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
6206 rtx x;
6207 rtx *src_reg;
6208 rtx *add_val;
6209 rtx *mult_val;
6210 int is_addr;
6211 int *pbenefit;
6213 rtx orig_x = x;
6214 char *storage;
6216 /* If this is an invariant, forget it, it isn't a giv. */
6217 if (invariant_p (x) == 1)
6218 return 0;
6220 /* See if the expression could be a giv and get its form.
6221 Mark our place on the obstack in case we don't find a giv. */
6222 storage = (char *) oballoc (0);
6223 *pbenefit = 0;
6224 x = simplify_giv_expr (x, pbenefit);
6225 if (x == 0)
6227 obfree (storage);
6228 return 0;
6231 switch (GET_CODE (x))
6233 case USE:
6234 case CONST_INT:
6235 /* Since this is now an invariant and wasn't before, it must be a giv
6236 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6237 with. */
6238 *src_reg = loop_iv_list->biv->dest_reg;
6239 *mult_val = const0_rtx;
6240 *add_val = x;
6241 break;
6243 case REG:
6244 /* This is equivalent to a BIV. */
6245 *src_reg = x;
6246 *mult_val = const1_rtx;
6247 *add_val = const0_rtx;
6248 break;
6250 case PLUS:
6251 /* Either (plus (biv) (invar)) or
6252 (plus (mult (biv) (invar_1)) (invar_2)). */
6253 if (GET_CODE (XEXP (x, 0)) == MULT)
6255 *src_reg = XEXP (XEXP (x, 0), 0);
6256 *mult_val = XEXP (XEXP (x, 0), 1);
6258 else
6260 *src_reg = XEXP (x, 0);
6261 *mult_val = const1_rtx;
6263 *add_val = XEXP (x, 1);
6264 break;
6266 case MULT:
6267 /* ADD_VAL is zero. */
6268 *src_reg = XEXP (x, 0);
6269 *mult_val = XEXP (x, 1);
6270 *add_val = const0_rtx;
6271 break;
6273 default:
6274 abort ();
6277 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6278 unless they are CONST_INT). */
6279 if (GET_CODE (*add_val) == USE)
6280 *add_val = XEXP (*add_val, 0);
6281 if (GET_CODE (*mult_val) == USE)
6282 *mult_val = XEXP (*mult_val, 0);
6284 if (is_addr)
6286 #ifdef ADDRESS_COST
6287 *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
6288 #else
6289 *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
6290 #endif
6292 else
6293 *pbenefit += rtx_cost (orig_x, SET);
6295 /* Always return true if this is a giv so it will be detected as such,
6296 even if the benefit is zero or negative. This allows elimination
6297 of bivs that might otherwise not be eliminated. */
6298 return 1;
6301 /* Given an expression, X, try to form it as a linear function of a biv.
6302 We will canonicalize it to be of the form
6303 (plus (mult (BIV) (invar_1))
6304 (invar_2))
6305 with possible degeneracies.
6307 The invariant expressions must each be of a form that can be used as a
6308 machine operand. We surround then with a USE rtx (a hack, but localized
6309 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6310 routine; it is the caller's responsibility to strip them.
6312 If no such canonicalization is possible (i.e., two biv's are used or an
6313 expression that is neither invariant nor a biv or giv), this routine
6314 returns 0.
6316 For a non-zero return, the result will have a code of CONST_INT, USE,
6317 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6319 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6321 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6322 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6323 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
6324 static int cmp_recombine_givs_stats PARAMS ((const PTR, const PTR));
6326 static rtx
6327 simplify_giv_expr (x, benefit)
6328 rtx x;
6329 int *benefit;
6331 enum machine_mode mode = GET_MODE (x);
6332 rtx arg0, arg1;
6333 rtx tem;
6335 /* If this is not an integer mode, or if we cannot do arithmetic in this
6336 mode, this can't be a giv. */
6337 if (mode != VOIDmode
6338 && (GET_MODE_CLASS (mode) != MODE_INT
6339 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6340 return NULL_RTX;
6342 switch (GET_CODE (x))
6344 case PLUS:
6345 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
6346 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
6347 if (arg0 == 0 || arg1 == 0)
6348 return NULL_RTX;
6350 /* Put constant last, CONST_INT last if both constant. */
6351 if ((GET_CODE (arg0) == USE
6352 || GET_CODE (arg0) == CONST_INT)
6353 && ! ((GET_CODE (arg0) == USE
6354 && GET_CODE (arg1) == USE)
6355 || GET_CODE (arg1) == CONST_INT))
6356 tem = arg0, arg0 = arg1, arg1 = tem;
6358 /* Handle addition of zero, then addition of an invariant. */
6359 if (arg1 == const0_rtx)
6360 return arg0;
6361 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6362 switch (GET_CODE (arg0))
6364 case CONST_INT:
6365 case USE:
6366 /* Adding two invariants must result in an invariant, so enclose
6367 addition operation inside a USE and return it. */
6368 if (GET_CODE (arg0) == USE)
6369 arg0 = XEXP (arg0, 0);
6370 if (GET_CODE (arg1) == USE)
6371 arg1 = XEXP (arg1, 0);
6373 if (GET_CODE (arg0) == CONST_INT)
6374 tem = arg0, arg0 = arg1, arg1 = tem;
6375 if (GET_CODE (arg1) == CONST_INT)
6376 tem = sge_plus_constant (arg0, arg1);
6377 else
6378 tem = sge_plus (mode, arg0, arg1);
6380 if (GET_CODE (tem) != CONST_INT)
6381 tem = gen_rtx_USE (mode, tem);
6382 return tem;
6384 case REG:
6385 case MULT:
6386 /* biv + invar or mult + invar. Return sum. */
6387 return gen_rtx_PLUS (mode, arg0, arg1);
6389 case PLUS:
6390 /* (a + invar_1) + invar_2. Associate. */
6391 return
6392 simplify_giv_expr (gen_rtx_PLUS (mode,
6393 XEXP (arg0, 0),
6394 gen_rtx_PLUS (mode,
6395 XEXP (arg0, 1),
6396 arg1)),
6397 benefit);
6399 default:
6400 abort ();
6403 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6404 MULT to reduce cases. */
6405 if (GET_CODE (arg0) == REG)
6406 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6407 if (GET_CODE (arg1) == REG)
6408 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6410 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6411 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6412 Recurse to associate the second PLUS. */
6413 if (GET_CODE (arg1) == MULT)
6414 tem = arg0, arg0 = arg1, arg1 = tem;
6416 if (GET_CODE (arg1) == PLUS)
6417 return
6418 simplify_giv_expr (gen_rtx_PLUS (mode,
6419 gen_rtx_PLUS (mode, arg0,
6420 XEXP (arg1, 0)),
6421 XEXP (arg1, 1)),
6422 benefit);
6424 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6425 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6426 return NULL_RTX;
6428 if (!rtx_equal_p (arg0, arg1))
6429 return NULL_RTX;
6431 return simplify_giv_expr (gen_rtx_MULT (mode,
6432 XEXP (arg0, 0),
6433 gen_rtx_PLUS (mode,
6434 XEXP (arg0, 1),
6435 XEXP (arg1, 1))),
6436 benefit);
6438 case MINUS:
6439 /* Handle "a - b" as "a + b * (-1)". */
6440 return simplify_giv_expr (gen_rtx_PLUS (mode,
6441 XEXP (x, 0),
6442 gen_rtx_MULT (mode,
6443 XEXP (x, 1),
6444 constm1_rtx)),
6445 benefit);
6447 case MULT:
6448 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
6449 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
6450 if (arg0 == 0 || arg1 == 0)
6451 return NULL_RTX;
6453 /* Put constant last, CONST_INT last if both constant. */
6454 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6455 && GET_CODE (arg1) != CONST_INT)
6456 tem = arg0, arg0 = arg1, arg1 = tem;
6458 /* If second argument is not now constant, not giv. */
6459 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6460 return NULL_RTX;
6462 /* Handle multiply by 0 or 1. */
6463 if (arg1 == const0_rtx)
6464 return const0_rtx;
6466 else if (arg1 == const1_rtx)
6467 return arg0;
6469 switch (GET_CODE (arg0))
6471 case REG:
6472 /* biv * invar. Done. */
6473 return gen_rtx_MULT (mode, arg0, arg1);
6475 case CONST_INT:
6476 /* Product of two constants. */
6477 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6479 case USE:
6480 /* invar * invar. It is a giv, but very few of these will
6481 actually pay off, so limit to simple registers. */
6482 if (GET_CODE (arg1) != CONST_INT)
6483 return NULL_RTX;
6485 arg0 = XEXP (arg0, 0);
6486 if (GET_CODE (arg0) == REG)
6487 tem = gen_rtx_MULT (mode, arg0, arg1);
6488 else if (GET_CODE (arg0) == MULT
6489 && GET_CODE (XEXP (arg0, 0)) == REG
6490 && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
6492 tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
6493 GEN_INT (INTVAL (XEXP (arg0, 1))
6494 * INTVAL (arg1)));
6496 else
6497 return NULL_RTX;
6498 return gen_rtx_USE (mode, tem);
6500 case MULT:
6501 /* (a * invar_1) * invar_2. Associate. */
6502 return simplify_giv_expr (gen_rtx_MULT (mode,
6503 XEXP (arg0, 0),
6504 gen_rtx_MULT (mode,
6505 XEXP (arg0, 1),
6506 arg1)),
6507 benefit);
6509 case PLUS:
6510 /* (a + invar_1) * invar_2. Distribute. */
6511 return simplify_giv_expr (gen_rtx_PLUS (mode,
6512 gen_rtx_MULT (mode,
6513 XEXP (arg0, 0),
6514 arg1),
6515 gen_rtx_MULT (mode,
6516 XEXP (arg0, 1),
6517 arg1)),
6518 benefit);
6520 default:
6521 abort ();
6524 case ASHIFT:
6525 /* Shift by constant is multiply by power of two. */
6526 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6527 return 0;
6529 return
6530 simplify_giv_expr (gen_rtx_MULT (mode,
6531 XEXP (x, 0),
6532 GEN_INT ((HOST_WIDE_INT) 1
6533 << INTVAL (XEXP (x, 1)))),
6534 benefit);
6536 case NEG:
6537 /* "-a" is "a * (-1)" */
6538 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6539 benefit);
6541 case NOT:
6542 /* "~a" is "-a - 1". Silly, but easy. */
6543 return simplify_giv_expr (gen_rtx_MINUS (mode,
6544 gen_rtx_NEG (mode, XEXP (x, 0)),
6545 const1_rtx),
6546 benefit);
6548 case USE:
6549 /* Already in proper form for invariant. */
6550 return x;
6552 case REG:
6553 /* If this is a new register, we can't deal with it. */
6554 if (REGNO (x) >= max_reg_before_loop)
6555 return 0;
6557 /* Check for biv or giv. */
6558 switch (REG_IV_TYPE (REGNO (x)))
6560 case BASIC_INDUCT:
6561 return x;
6562 case GENERAL_INDUCT:
6564 struct induction *v = REG_IV_INFO (REGNO (x));
6566 /* Form expression from giv and add benefit. Ensure this giv
6567 can derive another and subtract any needed adjustment if so. */
6568 *benefit += v->benefit;
6569 if (v->cant_derive)
6570 return 0;
6572 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6573 v->src_reg, v->mult_val),
6574 v->add_val);
6576 if (v->derive_adjustment)
6577 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6578 return simplify_giv_expr (tem, benefit);
6581 default:
6582 /* If it isn't an induction variable, and it is invariant, we
6583 may be able to simplify things further by looking through
6584 the bits we just moved outside the loop. */
6585 if (invariant_p (x) == 1)
6587 struct movable *m;
6589 for (m = the_movables; m ; m = m->next)
6590 if (rtx_equal_p (x, m->set_dest))
6592 /* Ok, we found a match. Substitute and simplify. */
6594 /* If we match another movable, we must use that, as
6595 this one is going away. */
6596 if (m->match)
6597 return simplify_giv_expr (m->match->set_dest, benefit);
6599 /* If consec is non-zero, this is a member of a group of
6600 instructions that were moved together. We handle this
6601 case only to the point of seeking to the last insn and
6602 looking for a REG_EQUAL. Fail if we don't find one. */
6603 if (m->consec != 0)
6605 int i = m->consec;
6606 tem = m->insn;
6607 do { tem = NEXT_INSN (tem); } while (--i > 0);
6609 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6610 if (tem)
6611 tem = XEXP (tem, 0);
6613 else
6615 tem = single_set (m->insn);
6616 if (tem)
6617 tem = SET_SRC (tem);
6620 if (tem)
6622 /* What we are most interested in is pointer
6623 arithmetic on invariants -- only take
6624 patterns we may be able to do something with. */
6625 if (GET_CODE (tem) == PLUS
6626 || GET_CODE (tem) == MULT
6627 || GET_CODE (tem) == ASHIFT
6628 || GET_CODE (tem) == CONST_INT
6629 || GET_CODE (tem) == SYMBOL_REF)
6631 tem = simplify_giv_expr (tem, benefit);
6632 if (tem)
6633 return tem;
6635 else if (GET_CODE (tem) == CONST
6636 && GET_CODE (XEXP (tem, 0)) == PLUS
6637 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6638 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6640 tem = simplify_giv_expr (XEXP (tem, 0), benefit);
6641 if (tem)
6642 return tem;
6645 break;
6648 break;
6651 /* Fall through to general case. */
6652 default:
6653 /* If invariant, return as USE (unless CONST_INT).
6654 Otherwise, not giv. */
6655 if (GET_CODE (x) == USE)
6656 x = XEXP (x, 0);
6658 if (invariant_p (x) == 1)
6660 if (GET_CODE (x) == CONST_INT)
6661 return x;
6662 if (GET_CODE (x) == CONST
6663 && GET_CODE (XEXP (x, 0)) == PLUS
6664 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6665 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6666 x = XEXP (x, 0);
6667 return gen_rtx_USE (mode, x);
6669 else
6670 return 0;
6674 /* This routine folds invariants such that there is only ever one
6675 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6677 static rtx
6678 sge_plus_constant (x, c)
6679 rtx x, c;
6681 if (GET_CODE (x) == CONST_INT)
6682 return GEN_INT (INTVAL (x) + INTVAL (c));
6683 else if (GET_CODE (x) != PLUS)
6684 return gen_rtx_PLUS (GET_MODE (x), x, c);
6685 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6687 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6688 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6690 else if (GET_CODE (XEXP (x, 0)) == PLUS
6691 || GET_CODE (XEXP (x, 1)) != PLUS)
6693 return gen_rtx_PLUS (GET_MODE (x),
6694 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6696 else
6698 return gen_rtx_PLUS (GET_MODE (x),
6699 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6703 static rtx
6704 sge_plus (mode, x, y)
6705 enum machine_mode mode;
6706 rtx x, y;
6708 while (GET_CODE (y) == PLUS)
6710 rtx a = XEXP (y, 0);
6711 if (GET_CODE (a) == CONST_INT)
6712 x = sge_plus_constant (x, a);
6713 else
6714 x = gen_rtx_PLUS (mode, x, a);
6715 y = XEXP (y, 1);
6717 if (GET_CODE (y) == CONST_INT)
6718 x = sge_plus_constant (x, y);
6719 else
6720 x = gen_rtx_PLUS (mode, x, y);
6721 return x;
6724 /* Help detect a giv that is calculated by several consecutive insns;
6725 for example,
6726 giv = biv * M
6727 giv = giv + A
6728 The caller has already identified the first insn P as having a giv as dest;
6729 we check that all other insns that set the same register follow
6730 immediately after P, that they alter nothing else,
6731 and that the result of the last is still a giv.
6733 The value is 0 if the reg set in P is not really a giv.
6734 Otherwise, the value is the amount gained by eliminating
6735 all the consecutive insns that compute the value.
6737 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6738 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6740 The coefficients of the ultimate giv value are stored in
6741 *MULT_VAL and *ADD_VAL. */
6743 static int
6744 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
6745 add_val, mult_val, last_consec_insn)
6746 int first_benefit;
6747 rtx p;
6748 rtx src_reg;
6749 rtx dest_reg;
6750 rtx *add_val;
6751 rtx *mult_val;
6752 rtx *last_consec_insn;
6754 int count;
6755 enum rtx_code code;
6756 int benefit;
6757 rtx temp;
6758 rtx set;
6760 /* Indicate that this is a giv so that we can update the value produced in
6761 each insn of the multi-insn sequence.
6763 This induction structure will be used only by the call to
6764 general_induction_var below, so we can allocate it on our stack.
6765 If this is a giv, our caller will replace the induct var entry with
6766 a new induction structure. */
6767 struct induction *v
6768 = (struct induction *) alloca (sizeof (struct induction));
6769 v->src_reg = src_reg;
6770 v->mult_val = *mult_val;
6771 v->add_val = *add_val;
6772 v->benefit = first_benefit;
6773 v->cant_derive = 0;
6774 v->derive_adjustment = 0;
6776 REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
6777 REG_IV_INFO (REGNO (dest_reg)) = v;
6779 count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
6781 while (count > 0)
6783 p = NEXT_INSN (p);
6784 code = GET_CODE (p);
6786 /* If libcall, skip to end of call sequence. */
6787 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6788 p = XEXP (temp, 0);
6790 if (code == INSN
6791 && (set = single_set (p))
6792 && GET_CODE (SET_DEST (set)) == REG
6793 && SET_DEST (set) == dest_reg
6794 && (general_induction_var (SET_SRC (set), &src_reg,
6795 add_val, mult_val, 0, &benefit)
6796 /* Giv created by equivalent expression. */
6797 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6798 && general_induction_var (XEXP (temp, 0), &src_reg,
6799 add_val, mult_val, 0, &benefit)))
6800 && src_reg == v->src_reg)
6802 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6803 benefit += libcall_benefit (p);
6805 count--;
6806 v->mult_val = *mult_val;
6807 v->add_val = *add_val;
6808 v->benefit = benefit;
6810 else if (code != NOTE)
6812 /* Allow insns that set something other than this giv to a
6813 constant. Such insns are needed on machines which cannot
6814 include long constants and should not disqualify a giv. */
6815 if (code == INSN
6816 && (set = single_set (p))
6817 && SET_DEST (set) != dest_reg
6818 && CONSTANT_P (SET_SRC (set)))
6819 continue;
6821 REG_IV_TYPE (REGNO (dest_reg)) = UNKNOWN_INDUCT;
6822 return 0;
6826 *last_consec_insn = p;
6827 return v->benefit;
6830 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6831 represented by G1. If no such expression can be found, or it is clear that
6832 it cannot possibly be a valid address, 0 is returned.
6834 To perform the computation, we note that
6835 G1 = x * v + a and
6836 G2 = y * v + b
6837 where `v' is the biv.
6839 So G2 = (y/b) * G1 + (b - a*y/x).
6841 Note that MULT = y/x.
6843 Update: A and B are now allowed to be additive expressions such that
6844 B contains all variables in A. That is, computing B-A will not require
6845 subtracting variables. */
6847 static rtx
6848 express_from_1 (a, b, mult)
6849 rtx a, b, mult;
6851 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6853 if (mult == const0_rtx)
6854 return b;
6856 /* If MULT is not 1, we cannot handle A with non-constants, since we
6857 would then be required to subtract multiples of the registers in A.
6858 This is theoretically possible, and may even apply to some Fortran
6859 constructs, but it is a lot of work and we do not attempt it here. */
6861 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6862 return NULL_RTX;
6864 /* In general these structures are sorted top to bottom (down the PLUS
6865 chain), but not left to right across the PLUS. If B is a higher
6866 order giv than A, we can strip one level and recurse. If A is higher
6867 order, we'll eventually bail out, but won't know that until the end.
6868 If they are the same, we'll strip one level around this loop. */
6870 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6872 rtx ra, rb, oa, ob, tmp;
6874 ra = XEXP (a, 0), oa = XEXP (a, 1);
6875 if (GET_CODE (ra) == PLUS)
6876 tmp = ra, ra = oa, oa = tmp;
6878 rb = XEXP (b, 0), ob = XEXP (b, 1);
6879 if (GET_CODE (rb) == PLUS)
6880 tmp = rb, rb = ob, ob = tmp;
6882 if (rtx_equal_p (ra, rb))
6883 /* We matched: remove one reg completely. */
6884 a = oa, b = ob;
6885 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6886 /* An alternate match. */
6887 a = oa, b = rb;
6888 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6889 /* An alternate match. */
6890 a = ra, b = ob;
6891 else
6893 /* Indicates an extra register in B. Strip one level from B and
6894 recurse, hoping B was the higher order expression. */
6895 ob = express_from_1 (a, ob, mult);
6896 if (ob == NULL_RTX)
6897 return NULL_RTX;
6898 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6902 /* Here we are at the last level of A, go through the cases hoping to
6903 get rid of everything but a constant. */
6905 if (GET_CODE (a) == PLUS)
6907 rtx ra, oa;
6909 ra = XEXP (a, 0), oa = XEXP (a, 1);
6910 if (rtx_equal_p (oa, b))
6911 oa = ra;
6912 else if (!rtx_equal_p (ra, b))
6913 return NULL_RTX;
6915 if (GET_CODE (oa) != CONST_INT)
6916 return NULL_RTX;
6918 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6920 else if (GET_CODE (a) == CONST_INT)
6922 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6924 else if (GET_CODE (b) == PLUS)
6926 if (rtx_equal_p (a, XEXP (b, 0)))
6927 return XEXP (b, 1);
6928 else if (rtx_equal_p (a, XEXP (b, 1)))
6929 return XEXP (b, 0);
6930 else
6931 return NULL_RTX;
6933 else if (rtx_equal_p (a, b))
6934 return const0_rtx;
6936 return NULL_RTX;
6940 express_from (g1, g2)
6941 struct induction *g1, *g2;
6943 rtx mult, add;
6945 /* The value that G1 will be multiplied by must be a constant integer. Also,
6946 the only chance we have of getting a valid address is if b*c/a (see above
6947 for notation) is also an integer. */
6948 if (GET_CODE (g1->mult_val) == CONST_INT
6949 && GET_CODE (g2->mult_val) == CONST_INT)
6951 if (g1->mult_val == const0_rtx
6952 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6953 return NULL_RTX;
6954 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6956 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6957 mult = const1_rtx;
6958 else
6960 /* ??? Find out if the one is a multiple of the other? */
6961 return NULL_RTX;
6964 add = express_from_1 (g1->add_val, g2->add_val, mult);
6965 if (add == NULL_RTX)
6967 /* Failed. If we've got a multiplication factor between G1 and G2,
6968 scale G1's addend and try again. */
6969 if (INTVAL (mult) > 1)
6971 rtx g1_add_val = g1->add_val;
6972 if (GET_CODE (g1_add_val) == MULT
6973 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6975 HOST_WIDE_INT m;
6976 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6977 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6978 XEXP (g1_add_val, 0), GEN_INT (m));
6980 else
6982 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6983 mult);
6986 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6989 if (add == NULL_RTX)
6990 return NULL_RTX;
6992 /* Form simplified final result. */
6993 if (mult == const0_rtx)
6994 return add;
6995 else if (mult == const1_rtx)
6996 mult = g1->dest_reg;
6997 else
6998 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7000 if (add == const0_rtx)
7001 return mult;
7002 else
7004 if (GET_CODE (add) == PLUS
7005 && CONSTANT_P (XEXP (add, 1)))
7007 rtx tem = XEXP (add, 1);
7008 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7009 add = tem;
7012 return gen_rtx_PLUS (g2->mode, mult, add);
7017 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7018 represented by G1. This indicates that G2 should be combined with G1 and
7019 that G2 can use (either directly or via an address expression) a register
7020 used to represent G1. */
7022 static rtx
7023 combine_givs_p (g1, g2)
7024 struct induction *g1, *g2;
7026 rtx tem = express_from (g1, g2);
7028 /* If these givs are identical, they can be combined. We use the results
7029 of express_from because the addends are not in a canonical form, so
7030 rtx_equal_p is a weaker test. */
7031 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7032 combination to be the other way round. */
7033 if (tem == g1->dest_reg
7034 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7036 return g1->dest_reg;
7039 /* If G2 can be expressed as a function of G1 and that function is valid
7040 as an address and no more expensive than using a register for G2,
7041 the expression of G2 in terms of G1 can be used. */
7042 if (tem != NULL_RTX
7043 && g2->giv_type == DEST_ADDR
7044 && memory_address_p (g2->mem_mode, tem)
7045 /* ??? Looses, especially with -fforce-addr, where *g2->location
7046 will always be a register, and so anything more complicated
7047 gets discarded. */
7048 #if 0
7049 #ifdef ADDRESS_COST
7050 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7051 #else
7052 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7053 #endif
7054 #endif
7057 return tem;
7060 return NULL_RTX;
7063 struct combine_givs_stats
7065 int giv_number;
7066 int total_benefit;
7069 static int
7070 cmp_combine_givs_stats (xp, yp)
7071 const PTR xp;
7072 const PTR yp;
7074 const struct combine_givs_stats * const x =
7075 (const struct combine_givs_stats *) xp;
7076 const struct combine_givs_stats * const y =
7077 (const struct combine_givs_stats *) yp;
7078 int d;
7079 d = y->total_benefit - x->total_benefit;
7080 /* Stabilize the sort. */
7081 if (!d)
7082 d = x->giv_number - y->giv_number;
7083 return d;
7086 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7087 any other. If so, point SAME to the giv combined with and set NEW_REG to
7088 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7089 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7091 static void
7092 combine_givs (bl)
7093 struct iv_class *bl;
7095 /* Additional benefit to add for being combined multiple times. */
7096 const int extra_benefit = 3;
7098 struct induction *g1, *g2, **giv_array;
7099 int i, j, k, giv_count;
7100 struct combine_givs_stats *stats;
7101 rtx *can_combine;
7103 /* Count givs, because bl->giv_count is incorrect here. */
7104 giv_count = 0;
7105 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7106 if (!g1->ignore)
7107 giv_count++;
7109 giv_array
7110 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7111 i = 0;
7112 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7113 if (!g1->ignore)
7114 giv_array[i++] = g1;
7116 stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
7117 bzero ((char *) stats, giv_count * sizeof (*stats));
7119 can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
7120 bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
7122 for (i = 0; i < giv_count; i++)
7124 int this_benefit;
7125 rtx single_use;
7127 g1 = giv_array[i];
7128 stats[i].giv_number = i;
7130 /* If a DEST_REG GIV is used only once, do not allow it to combine
7131 with anything, for in doing so we will gain nothing that cannot
7132 be had by simply letting the GIV with which we would have combined
7133 to be reduced on its own. The losage shows up in particular with
7134 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7135 be seen elsewhere as well. */
7136 if (g1->giv_type == DEST_REG
7137 && (single_use = VARRAY_RTX (reg_single_usage, REGNO (g1->dest_reg)))
7138 && single_use != const0_rtx)
7139 continue;
7141 this_benefit = g1->benefit;
7142 /* Add an additional weight for zero addends. */
7143 if (g1->no_const_addval)
7144 this_benefit += 1;
7146 for (j = 0; j < giv_count; j++)
7148 rtx this_combine;
7150 g2 = giv_array[j];
7151 if (g1 != g2
7152 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7154 can_combine[i*giv_count + j] = this_combine;
7155 this_benefit += g2->benefit + extra_benefit;
7158 stats[i].total_benefit = this_benefit;
7161 /* Iterate, combining until we can't. */
7162 restart:
7163 qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
7165 if (loop_dump_stream)
7167 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7168 for (k = 0; k < giv_count; k++)
7170 g1 = giv_array[stats[k].giv_number];
7171 if (!g1->combined_with && !g1->same)
7172 fprintf (loop_dump_stream, " {%d, %d}",
7173 INSN_UID (giv_array[stats[k].giv_number]->insn),
7174 stats[k].total_benefit);
7176 putc ('\n', loop_dump_stream);
7179 for (k = 0; k < giv_count; k++)
7181 int g1_add_benefit = 0;
7183 i = stats[k].giv_number;
7184 g1 = giv_array[i];
7186 /* If it has already been combined, skip. */
7187 if (g1->combined_with || g1->same)
7188 continue;
7190 for (j = 0; j < giv_count; j++)
7192 g2 = giv_array[j];
7193 if (g1 != g2 && can_combine[i*giv_count + j]
7194 /* If it has already been combined, skip. */
7195 && ! g2->same && ! g2->combined_with)
7197 int l;
7199 g2->new_reg = can_combine[i*giv_count + j];
7200 g2->same = g1;
7201 g1->combined_with++;
7202 g1->lifetime += g2->lifetime;
7204 g1_add_benefit += g2->benefit;
7206 /* ??? The new final_[bg]iv_value code does a much better job
7207 of finding replaceable giv's, and hence this code may no
7208 longer be necessary. */
7209 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7210 g1_add_benefit -= copy_cost;
7212 /* To help optimize the next set of combinations, remove
7213 this giv from the benefits of other potential mates. */
7214 for (l = 0; l < giv_count; ++l)
7216 int m = stats[l].giv_number;
7217 if (can_combine[m*giv_count + j])
7218 stats[l].total_benefit -= g2->benefit + extra_benefit;
7221 if (loop_dump_stream)
7222 fprintf (loop_dump_stream,
7223 "giv at %d combined with giv at %d\n",
7224 INSN_UID (g2->insn), INSN_UID (g1->insn));
7228 /* To help optimize the next set of combinations, remove
7229 this giv from the benefits of other potential mates. */
7230 if (g1->combined_with)
7232 for (j = 0; j < giv_count; ++j)
7234 int m = stats[j].giv_number;
7235 if (can_combine[m*giv_count + i])
7236 stats[j].total_benefit -= g1->benefit + extra_benefit;
7239 g1->benefit += g1_add_benefit;
7241 /* We've finished with this giv, and everything it touched.
7242 Restart the combination so that proper weights for the
7243 rest of the givs are properly taken into account. */
7244 /* ??? Ideally we would compact the arrays at this point, so
7245 as to not cover old ground. But sanely compacting
7246 can_combine is tricky. */
7247 goto restart;
7252 struct recombine_givs_stats
7254 int giv_number;
7255 int start_luid, end_luid;
7258 /* Used below as comparison function for qsort. We want a ascending luid
7259 when scanning the array starting at the end, thus the arguments are
7260 used in reverse. */
7261 static int
7262 cmp_recombine_givs_stats (xp, yp)
7263 const PTR xp;
7264 const PTR yp;
7266 const struct recombine_givs_stats * const x =
7267 (const struct recombine_givs_stats *) xp;
7268 const struct recombine_givs_stats * const y =
7269 (const struct recombine_givs_stats *) yp;
7270 int d;
7271 d = y->start_luid - x->start_luid;
7272 /* Stabilize the sort. */
7273 if (!d)
7274 d = y->giv_number - x->giv_number;
7275 return d;
7278 /* Scan X, which is a part of INSN, for the end of life of a giv. Also
7279 look for the start of life of a giv where the start has not been seen
7280 yet to unlock the search for the end of its life.
7281 Only consider givs that belong to BIV.
7282 Return the total number of lifetime ends that have been found. */
7283 static int
7284 find_life_end (x, stats, insn, biv)
7285 rtx x, insn, biv;
7286 struct recombine_givs_stats *stats;
7288 enum rtx_code code;
7289 const char *fmt;
7290 int i, j;
7291 int retval;
7293 code = GET_CODE (x);
7294 switch (code)
7296 case SET:
7298 rtx reg = SET_DEST (x);
7299 if (GET_CODE (reg) == REG)
7301 int regno = REGNO (reg);
7302 struct induction *v = REG_IV_INFO (regno);
7304 if (REG_IV_TYPE (regno) == GENERAL_INDUCT
7305 && ! v->ignore
7306 && v->src_reg == biv
7307 && stats[v->ix].end_luid <= 0)
7309 /* If we see a 0 here for end_luid, it means that we have
7310 scanned the entire loop without finding any use at all.
7311 We must not predicate this code on a start_luid match
7312 since that would make the test fail for givs that have
7313 been hoisted out of inner loops. */
7314 if (stats[v->ix].end_luid == 0)
7316 stats[v->ix].end_luid = stats[v->ix].start_luid;
7317 return 1 + find_life_end (SET_SRC (x), stats, insn, biv);
7319 else if (stats[v->ix].start_luid == INSN_LUID (insn))
7320 stats[v->ix].end_luid = 0;
7322 return find_life_end (SET_SRC (x), stats, insn, biv);
7324 break;
7326 case REG:
7328 int regno = REGNO (x);
7329 struct induction *v = REG_IV_INFO (regno);
7331 if (REG_IV_TYPE (regno) == GENERAL_INDUCT
7332 && ! v->ignore
7333 && v->src_reg == biv
7334 && stats[v->ix].end_luid == 0)
7336 while (INSN_UID (insn) >= max_uid_for_loop)
7337 insn = NEXT_INSN (insn);
7338 stats[v->ix].end_luid = INSN_LUID (insn);
7339 return 1;
7341 return 0;
7343 case LABEL_REF:
7344 case CONST_DOUBLE:
7345 case CONST_INT:
7346 case CONST:
7347 return 0;
7348 default:
7349 break;
7351 fmt = GET_RTX_FORMAT (code);
7352 retval = 0;
7353 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7355 if (fmt[i] == 'e')
7356 retval += find_life_end (XEXP (x, i), stats, insn, biv);
7358 else if (fmt[i] == 'E')
7359 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7360 retval += find_life_end (XVECEXP (x, i, j), stats, insn, biv);
7362 return retval;
7365 /* For each giv that has been combined with another, look if
7366 we can combine it with the most recently used one instead.
7367 This tends to shorten giv lifetimes, and helps the next step:
7368 try to derive givs from other givs. */
7369 static void
7370 recombine_givs (bl, loop_start, loop_end, unroll_p)
7371 struct iv_class *bl;
7372 rtx loop_start, loop_end;
7373 int unroll_p;
7375 struct induction *v, **giv_array, *last_giv;
7376 struct recombine_givs_stats *stats;
7377 int giv_count;
7378 int i, rescan;
7379 int ends_need_computing;
7381 for (giv_count = 0, v = bl->giv; v; v = v->next_iv)
7383 if (! v->ignore)
7384 giv_count++;
7386 giv_array
7387 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7388 stats = (struct recombine_givs_stats *) alloca (giv_count * sizeof *stats);
7390 /* Initialize stats and set up the ix field for each giv in stats to name
7391 the corresponding index into stats. */
7392 for (i = 0, v = bl->giv; v; v = v->next_iv)
7394 rtx p;
7396 if (v->ignore)
7397 continue;
7398 giv_array[i] = v;
7399 stats[i].giv_number = i;
7400 /* If this giv has been hoisted out of an inner loop, use the luid of
7401 the previous insn. */
7402 for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
7403 p = PREV_INSN (p);
7404 stats[i].start_luid = INSN_LUID (p);
7405 i++;
7408 qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
7410 /* Set up the ix field for each giv in stats to name
7411 the corresponding index into stats, and
7412 do the actual most-recently-used recombination. */
7413 for (last_giv = 0, i = giv_count - 1; i >= 0; i--)
7415 v = giv_array[stats[i].giv_number];
7416 v->ix = i;
7417 if (v->same)
7419 struct induction *old_same = v->same;
7420 rtx new_combine;
7422 /* combine_givs_p actually says if we can make this transformation.
7423 The other tests are here only to avoid keeping a giv alive
7424 that could otherwise be eliminated. */
7425 if (last_giv
7426 && ((old_same->maybe_dead && ! old_same->combined_with)
7427 || ! last_giv->maybe_dead
7428 || last_giv->combined_with)
7429 && (new_combine = combine_givs_p (last_giv, v)))
7431 old_same->combined_with--;
7432 v->new_reg = new_combine;
7433 v->same = last_giv;
7434 last_giv->combined_with++;
7435 /* No need to update lifetimes / benefits here since we have
7436 already decided what to reduce. */
7438 if (loop_dump_stream)
7440 fprintf (loop_dump_stream,
7441 "giv at %d recombined with giv at %d as ",
7442 INSN_UID (v->insn), INSN_UID (last_giv->insn));
7443 print_rtl (loop_dump_stream, v->new_reg);
7444 putc ('\n', loop_dump_stream);
7446 continue;
7448 v = v->same;
7450 else if (v->giv_type != DEST_REG)
7451 continue;
7452 if (! last_giv
7453 || (last_giv->maybe_dead && ! last_giv->combined_with)
7454 || ! v->maybe_dead
7455 || v->combined_with)
7456 last_giv = v;
7459 ends_need_computing = 0;
7460 /* For each DEST_REG giv, compute lifetime starts, and try to compute
7461 lifetime ends from regscan info. */
7462 for (i = giv_count - 1; i >= 0; i--)
7464 v = giv_array[stats[i].giv_number];
7465 if (v->ignore)
7466 continue;
7467 if (v->giv_type == DEST_ADDR)
7469 /* Loop unrolling of an inner loop can even create new DEST_REG
7470 givs. */
7471 rtx p;
7472 for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
7473 p = PREV_INSN (p);
7474 stats[i].start_luid = stats[i].end_luid = INSN_LUID (p);
7475 if (p != v->insn)
7476 stats[i].end_luid++;
7478 else /* v->giv_type == DEST_REG */
7480 if (v->last_use)
7482 stats[i].start_luid = INSN_LUID (v->insn);
7483 stats[i].end_luid = INSN_LUID (v->last_use);
7485 else if (INSN_UID (v->insn) >= max_uid_for_loop)
7487 rtx p;
7488 /* This insn has been created by loop optimization on an inner
7489 loop. We don't have a proper start_luid that will match
7490 when we see the first set. But we do know that there will
7491 be no use before the set, so we can set end_luid to 0 so that
7492 we'll start looking for the last use right away. */
7493 for (p = PREV_INSN (v->insn); INSN_UID (p) >= max_uid_for_loop; )
7494 p = PREV_INSN (p);
7495 stats[i].start_luid = INSN_LUID (p);
7496 stats[i].end_luid = 0;
7497 ends_need_computing++;
7499 else
7501 int regno = REGNO (v->dest_reg);
7502 int count = VARRAY_INT (n_times_set, regno) - 1;
7503 rtx p = v->insn;
7505 /* Find the first insn that sets the giv, so that we can verify
7506 if this giv's lifetime wraps around the loop. We also need
7507 the luid of the first setting insn in order to detect the
7508 last use properly. */
7509 while (count)
7511 p = prev_nonnote_insn (p);
7512 if (reg_set_p (v->dest_reg, p))
7513 count--;
7516 stats[i].start_luid = INSN_LUID (p);
7517 if (stats[i].start_luid > uid_luid[REGNO_FIRST_UID (regno)])
7519 stats[i].end_luid = -1;
7520 ends_need_computing++;
7522 else
7524 stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)];
7525 if (stats[i].end_luid > INSN_LUID (loop_end))
7527 stats[i].end_luid = -1;
7528 ends_need_computing++;
7535 /* If the regscan information was unconclusive for one or more DEST_REG
7536 givs, scan the all insn in the loop to find out lifetime ends. */
7537 if (ends_need_computing)
7539 rtx biv = bl->biv->src_reg;
7540 rtx p = loop_end;
7544 if (p == loop_start)
7545 p = loop_end;
7546 p = PREV_INSN (p);
7547 if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
7548 continue;
7549 ends_need_computing -= find_life_end (PATTERN (p), stats, p, biv);
7551 while (ends_need_computing);
7554 /* Set start_luid back to the last insn that sets the giv. This allows
7555 more combinations. */
7556 for (i = giv_count - 1; i >= 0; i--)
7558 v = giv_array[stats[i].giv_number];
7559 if (v->ignore)
7560 continue;
7561 if (INSN_UID (v->insn) < max_uid_for_loop)
7562 stats[i].start_luid = INSN_LUID (v->insn);
7565 /* Now adjust lifetime ends by taking combined givs into account. */
7566 for (i = giv_count - 1; i >= 0; i--)
7568 unsigned luid;
7569 int j;
7571 v = giv_array[stats[i].giv_number];
7572 if (v->ignore)
7573 continue;
7574 if (v->same && ! v->same->ignore)
7576 j = v->same->ix;
7577 luid = stats[i].start_luid;
7578 /* Use unsigned arithmetic to model loop wrap-around. */
7579 if (luid - stats[j].start_luid
7580 > (unsigned) stats[j].end_luid - stats[j].start_luid)
7581 stats[j].end_luid = luid;
7585 qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
7587 /* Try to derive DEST_REG givs from previous DEST_REG givs with the
7588 same mult_val and non-overlapping lifetime. This reduces register
7589 pressure.
7590 Once we find a DEST_REG giv that is suitable to derive others from,
7591 we set last_giv to this giv, and try to derive as many other DEST_REG
7592 givs from it without joining overlapping lifetimes. If we then
7593 encounter a DEST_REG giv that we can't derive, we set rescan to the
7594 index for this giv (unless rescan is already set).
7595 When we are finished with the current LAST_GIV (i.e. the inner loop
7596 terminates), we start again with rescan, which then becomes the new
7597 LAST_GIV. */
7598 for (i = giv_count - 1; i >= 0; i = rescan)
7600 int life_start, life_end;
7602 for (last_giv = 0, rescan = -1; i >= 0; i--)
7604 rtx sum;
7606 v = giv_array[stats[i].giv_number];
7607 if (v->giv_type != DEST_REG || v->derived_from || v->same)
7608 continue;
7609 if (! last_giv)
7611 /* Don't use a giv that's likely to be dead to derive
7612 others - that would be likely to keep that giv alive. */
7613 if (! v->maybe_dead || v->combined_with)
7615 last_giv = v;
7616 life_start = stats[i].start_luid;
7617 life_end = stats[i].end_luid;
7619 continue;
7621 /* Use unsigned arithmetic to model loop wrap around. */
7622 if (((unsigned) stats[i].start_luid - life_start
7623 >= (unsigned) life_end - life_start)
7624 && ((unsigned) stats[i].end_luid - life_start
7625 > (unsigned) life_end - life_start)
7626 /* Check that the giv insn we're about to use for deriving
7627 precedes all uses of that giv. Note that initializing the
7628 derived giv would defeat the purpose of reducing register
7629 pressure.
7630 ??? We could arrange to move the insn. */
7631 && ((unsigned) stats[i].end_luid - INSN_LUID (loop_start)
7632 > (unsigned) stats[i].start_luid - INSN_LUID (loop_start))
7633 && rtx_equal_p (last_giv->mult_val, v->mult_val)
7634 /* ??? Could handle libcalls, but would need more logic. */
7635 && ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX)
7636 /* We would really like to know if for any giv that v
7637 is combined with, v->insn or any intervening biv increment
7638 dominates that combined giv. However, we
7639 don't have this detailed control flow information.
7640 N.B. since last_giv will be reduced, it is valid
7641 anywhere in the loop, so we don't need to check the
7642 validity of last_giv.
7643 We rely here on the fact that v->always_executed implies that
7644 there is no jump to someplace else in the loop before the
7645 giv insn, and hence any insn that is executed before the
7646 giv insn in the loop will have a lower luid. */
7647 && (v->always_executed || ! v->combined_with)
7648 && (sum = express_from (last_giv, v))
7649 /* Make sure we don't make the add more expensive. ADD_COST
7650 doesn't take different costs of registers and constants into
7651 account, so compare the cost of the actual SET_SRCs. */
7652 && (rtx_cost (sum, SET)
7653 <= rtx_cost (SET_SRC (single_set (v->insn)), SET))
7654 /* ??? unroll can't understand anything but reg + const_int
7655 sums. It would be cleaner to fix unroll. */
7656 && ((GET_CODE (sum) == PLUS
7657 && GET_CODE (XEXP (sum, 0)) == REG
7658 && GET_CODE (XEXP (sum, 1)) == CONST_INT)
7659 || ! unroll_p)
7660 && validate_change (v->insn, &PATTERN (v->insn),
7661 gen_rtx_SET (VOIDmode, v->dest_reg, sum), 0))
7663 v->derived_from = last_giv;
7664 life_end = stats[i].end_luid;
7666 if (loop_dump_stream)
7668 fprintf (loop_dump_stream,
7669 "giv at %d derived from %d as ",
7670 INSN_UID (v->insn), INSN_UID (last_giv->insn));
7671 print_rtl (loop_dump_stream, sum);
7672 putc ('\n', loop_dump_stream);
7675 else if (rescan < 0)
7676 rescan = i;
7681 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
7683 void
7684 emit_iv_add_mult (b, m, a, reg, insert_before)
7685 rtx b; /* initial value of basic induction variable */
7686 rtx m; /* multiplicative constant */
7687 rtx a; /* additive constant */
7688 rtx reg; /* destination register */
7689 rtx insert_before;
7691 rtx seq;
7692 rtx result;
7694 /* Prevent unexpected sharing of these rtx. */
7695 a = copy_rtx (a);
7696 b = copy_rtx (b);
7698 /* Increase the lifetime of any invariants moved further in code. */
7699 update_reg_last_use (a, insert_before);
7700 update_reg_last_use (b, insert_before);
7701 update_reg_last_use (m, insert_before);
7703 start_sequence ();
7704 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
7705 if (reg != result)
7706 emit_move_insn (reg, result);
7707 seq = gen_sequence ();
7708 end_sequence ();
7710 emit_insn_before (seq, insert_before);
7712 /* It is entirely possible that the expansion created lots of new
7713 registers. Iterate over the sequence we just created and
7714 record them all. */
7716 if (GET_CODE (seq) == SEQUENCE)
7718 int i;
7719 for (i = 0; i < XVECLEN (seq, 0); ++i)
7721 rtx set = single_set (XVECEXP (seq, 0, i));
7722 if (set && GET_CODE (SET_DEST (set)) == REG)
7723 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7726 else if (GET_CODE (seq) == SET
7727 && GET_CODE (SET_DEST (seq)) == REG)
7728 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7731 /* Test whether A * B can be computed without
7732 an actual multiply insn. Value is 1 if so. */
7734 static int
7735 product_cheap_p (a, b)
7736 rtx a;
7737 rtx b;
7739 int i;
7740 rtx tmp;
7741 struct obstack *old_rtl_obstack = rtl_obstack;
7742 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
7743 int win = 1;
7745 /* If only one is constant, make it B. */
7746 if (GET_CODE (a) == CONST_INT)
7747 tmp = a, a = b, b = tmp;
7749 /* If first constant, both constant, so don't need multiply. */
7750 if (GET_CODE (a) == CONST_INT)
7751 return 1;
7753 /* If second not constant, neither is constant, so would need multiply. */
7754 if (GET_CODE (b) != CONST_INT)
7755 return 0;
7757 /* One operand is constant, so might not need multiply insn. Generate the
7758 code for the multiply and see if a call or multiply, or long sequence
7759 of insns is generated. */
7761 rtl_obstack = &temp_obstack;
7762 start_sequence ();
7763 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
7764 tmp = gen_sequence ();
7765 end_sequence ();
7767 if (GET_CODE (tmp) == SEQUENCE)
7769 if (XVEC (tmp, 0) == 0)
7770 win = 1;
7771 else if (XVECLEN (tmp, 0) > 3)
7772 win = 0;
7773 else
7774 for (i = 0; i < XVECLEN (tmp, 0); i++)
7776 rtx insn = XVECEXP (tmp, 0, i);
7778 if (GET_CODE (insn) != INSN
7779 || (GET_CODE (PATTERN (insn)) == SET
7780 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7781 || (GET_CODE (PATTERN (insn)) == PARALLEL
7782 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7783 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7785 win = 0;
7786 break;
7790 else if (GET_CODE (tmp) == SET
7791 && GET_CODE (SET_SRC (tmp)) == MULT)
7792 win = 0;
7793 else if (GET_CODE (tmp) == PARALLEL
7794 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7795 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7796 win = 0;
7798 /* Free any storage we obtained in generating this multiply and restore rtl
7799 allocation to its normal obstack. */
7800 obstack_free (&temp_obstack, storage);
7801 rtl_obstack = old_rtl_obstack;
7803 return win;
7806 /* Check to see if loop can be terminated by a "decrement and branch until
7807 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7808 Also try reversing an increment loop to a decrement loop
7809 to see if the optimization can be performed.
7810 Value is nonzero if optimization was performed. */
7812 /* This is useful even if the architecture doesn't have such an insn,
7813 because it might change a loops which increments from 0 to n to a loop
7814 which decrements from n to 0. A loop that decrements to zero is usually
7815 faster than one that increments from zero. */
7817 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7818 such as approx_final_value, biv_total_increment, loop_iterations, and
7819 final_[bg]iv_value. */
7821 static int
7822 check_dbra_loop (loop_end, insn_count, loop_start, loop_info)
7823 rtx loop_end;
7824 int insn_count;
7825 rtx loop_start;
7826 struct loop_info *loop_info;
7828 struct iv_class *bl;
7829 rtx reg;
7830 rtx jump_label;
7831 rtx final_value;
7832 rtx start_value;
7833 rtx new_add_val;
7834 rtx comparison;
7835 rtx before_comparison;
7836 rtx p;
7837 rtx jump;
7838 rtx first_compare;
7839 int compare_and_branch;
7841 /* If last insn is a conditional branch, and the insn before tests a
7842 register value, try to optimize it. Otherwise, we can't do anything. */
7844 jump = PREV_INSN (loop_end);
7845 comparison = get_condition_for_loop (jump);
7846 if (comparison == 0)
7847 return 0;
7849 /* Try to compute whether the compare/branch at the loop end is one or
7850 two instructions. */
7851 get_condition (jump, &first_compare);
7852 if (first_compare == jump)
7853 compare_and_branch = 1;
7854 else if (first_compare == prev_nonnote_insn (jump))
7855 compare_and_branch = 2;
7856 else
7857 return 0;
7859 /* Check all of the bivs to see if the compare uses one of them.
7860 Skip biv's set more than once because we can't guarantee that
7861 it will be zero on the last iteration. Also skip if the biv is
7862 used between its update and the test insn. */
7864 for (bl = loop_iv_list; bl; bl = bl->next)
7866 if (bl->biv_count == 1
7867 && bl->biv->dest_reg == XEXP (comparison, 0)
7868 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7869 first_compare))
7870 break;
7873 if (! bl)
7874 return 0;
7876 /* Look for the case where the basic induction variable is always
7877 nonnegative, and equals zero on the last iteration.
7878 In this case, add a reg_note REG_NONNEG, which allows the
7879 m68k DBRA instruction to be used. */
7881 if (((GET_CODE (comparison) == GT
7882 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7883 && INTVAL (XEXP (comparison, 1)) == -1)
7884 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7885 && GET_CODE (bl->biv->add_val) == CONST_INT
7886 && INTVAL (bl->biv->add_val) < 0)
7888 /* Initial value must be greater than 0,
7889 init_val % -dec_value == 0 to ensure that it equals zero on
7890 the last iteration */
7892 if (GET_CODE (bl->initial_value) == CONST_INT
7893 && INTVAL (bl->initial_value) > 0
7894 && (INTVAL (bl->initial_value)
7895 % (-INTVAL (bl->biv->add_val))) == 0)
7897 /* register always nonnegative, add REG_NOTE to branch */
7898 REG_NOTES (PREV_INSN (loop_end))
7899 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
7900 REG_NOTES (PREV_INSN (loop_end)));
7901 bl->nonneg = 1;
7903 return 1;
7906 /* If the decrement is 1 and the value was tested as >= 0 before
7907 the loop, then we can safely optimize. */
7908 for (p = loop_start; p; p = PREV_INSN (p))
7910 if (GET_CODE (p) == CODE_LABEL)
7911 break;
7912 if (GET_CODE (p) != JUMP_INSN)
7913 continue;
7915 before_comparison = get_condition_for_loop (p);
7916 if (before_comparison
7917 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7918 && GET_CODE (before_comparison) == LT
7919 && XEXP (before_comparison, 1) == const0_rtx
7920 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7921 && INTVAL (bl->biv->add_val) == -1)
7923 REG_NOTES (PREV_INSN (loop_end))
7924 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
7925 REG_NOTES (PREV_INSN (loop_end)));
7926 bl->nonneg = 1;
7928 return 1;
7932 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7933 && INTVAL (bl->biv->add_val) > 0)
7935 /* Try to change inc to dec, so can apply above optimization. */
7936 /* Can do this if:
7937 all registers modified are induction variables or invariant,
7938 all memory references have non-overlapping addresses
7939 (obviously true if only one write)
7940 allow 2 insns for the compare/jump at the end of the loop. */
7941 /* Also, we must avoid any instructions which use both the reversed
7942 biv and another biv. Such instructions will fail if the loop is
7943 reversed. We meet this condition by requiring that either
7944 no_use_except_counting is true, or else that there is only
7945 one biv. */
7946 int num_nonfixed_reads = 0;
7947 /* 1 if the iteration var is used only to count iterations. */
7948 int no_use_except_counting = 0;
7949 /* 1 if the loop has no memory store, or it has a single memory store
7950 which is reversible. */
7951 int reversible_mem_store = 1;
7953 if (bl->giv_count == 0
7954 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
7956 rtx bivreg = regno_reg_rtx[bl->regno];
7958 /* If there are no givs for this biv, and the only exit is the
7959 fall through at the end of the loop, then
7960 see if perhaps there are no uses except to count. */
7961 no_use_except_counting = 1;
7962 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7963 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
7965 rtx set = single_set (p);
7967 if (set && GET_CODE (SET_DEST (set)) == REG
7968 && REGNO (SET_DEST (set)) == bl->regno)
7969 /* An insn that sets the biv is okay. */
7971 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7972 || p == prev_nonnote_insn (loop_end))
7973 && reg_mentioned_p (bivreg, PATTERN (p)))
7975 /* If either of these insns uses the biv and sets a pseudo
7976 that has more than one usage, then the biv has uses
7977 other than counting since it's used to derive a value
7978 that is used more than one time. */
7979 int note_set_pseudo_multiple_uses_retval = 0;
7980 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7981 &note_set_pseudo_multiple_uses_retval);
7982 if (note_set_pseudo_multiple_uses_retval)
7984 no_use_except_counting = 0;
7985 break;
7988 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7990 no_use_except_counting = 0;
7991 break;
7996 if (no_use_except_counting)
7997 ; /* no need to worry about MEMs. */
7998 else if (num_mem_sets <= 1)
8000 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8001 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
8002 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
8004 /* If the loop has a single store, and the destination address is
8005 invariant, then we can't reverse the loop, because this address
8006 might then have the wrong value at loop exit.
8007 This would work if the source was invariant also, however, in that
8008 case, the insn should have been moved out of the loop. */
8010 if (num_mem_sets == 1)
8012 struct induction *v;
8014 reversible_mem_store
8015 = (! unknown_address_altered
8016 && ! invariant_p (XEXP (XEXP (loop_store_mems, 0), 0)));
8018 /* If the store depends on a register that is set after the
8019 store, it depends on the initial value, and is thus not
8020 reversible. */
8021 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8023 if (v->giv_type == DEST_REG
8024 && reg_mentioned_p (v->dest_reg,
8025 XEXP (loop_store_mems, 0))
8026 && loop_insn_first_p (first_loop_store_insn, v->insn))
8027 reversible_mem_store = 0;
8031 else
8032 return 0;
8034 /* This code only acts for innermost loops. Also it simplifies
8035 the memory address check by only reversing loops with
8036 zero or one memory access.
8037 Two memory accesses could involve parts of the same array,
8038 and that can't be reversed.
8039 If the biv is used only for counting, than we don't need to worry
8040 about all these things. */
8042 if ((num_nonfixed_reads <= 1
8043 && ! loop_info->has_call
8044 && ! loop_info->has_volatile
8045 && reversible_mem_store
8046 && (bl->giv_count + bl->biv_count + num_mem_sets
8047 + num_movables + compare_and_branch == insn_count)
8048 && (bl == loop_iv_list && bl->next == 0))
8049 || no_use_except_counting)
8051 rtx tem;
8053 /* Loop can be reversed. */
8054 if (loop_dump_stream)
8055 fprintf (loop_dump_stream, "Can reverse loop\n");
8057 /* Now check other conditions:
8059 The increment must be a constant, as must the initial value,
8060 and the comparison code must be LT.
8062 This test can probably be improved since +/- 1 in the constant
8063 can be obtained by changing LT to LE and vice versa; this is
8064 confusing. */
8066 if (comparison
8067 /* for constants, LE gets turned into LT */
8068 && (GET_CODE (comparison) == LT
8069 || (GET_CODE (comparison) == LE
8070 && no_use_except_counting)))
8072 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8073 rtx initial_value, comparison_value;
8074 int nonneg = 0;
8075 enum rtx_code cmp_code;
8076 int comparison_const_width;
8077 unsigned HOST_WIDE_INT comparison_sign_mask;
8079 add_val = INTVAL (bl->biv->add_val);
8080 comparison_value = XEXP (comparison, 1);
8081 if (GET_MODE (comparison_value) == VOIDmode)
8082 comparison_const_width
8083 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8084 else
8085 comparison_const_width
8086 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8087 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8088 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8089 comparison_sign_mask
8090 = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1);
8092 /* If the comparison value is not a loop invariant, then we
8093 can not reverse this loop.
8095 ??? If the insns which initialize the comparison value as
8096 a whole compute an invariant result, then we could move
8097 them out of the loop and proceed with loop reversal. */
8098 if (!invariant_p (comparison_value))
8099 return 0;
8101 if (GET_CODE (comparison_value) == CONST_INT)
8102 comparison_val = INTVAL (comparison_value);
8103 initial_value = bl->initial_value;
8105 /* Normalize the initial value if it is an integer and
8106 has no other use except as a counter. This will allow
8107 a few more loops to be reversed. */
8108 if (no_use_except_counting
8109 && GET_CODE (comparison_value) == CONST_INT
8110 && GET_CODE (initial_value) == CONST_INT)
8112 comparison_val = comparison_val - INTVAL (bl->initial_value);
8113 /* The code below requires comparison_val to be a multiple
8114 of add_val in order to do the loop reversal, so
8115 round up comparison_val to a multiple of add_val.
8116 Since comparison_value is constant, we know that the
8117 current comparison code is LT. */
8118 comparison_val = comparison_val + add_val - 1;
8119 comparison_val
8120 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8121 /* We postpone overflow checks for COMPARISON_VAL here;
8122 even if there is an overflow, we might still be able to
8123 reverse the loop, if converting the loop exit test to
8124 NE is possible. */
8125 initial_value = const0_rtx;
8128 /* First check if we can do a vanilla loop reversal. */
8129 if (initial_value == const0_rtx
8130 /* If we have a decrement_and_branch_on_count,
8131 prefer the NE test, since this will allow that
8132 instruction to be generated. Note that we must
8133 use a vanilla loop reversal if the biv is used to
8134 calculate a giv or has a non-counting use. */
8135 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8136 && defined (HAVE_decrement_and_branch_on_count)
8137 && (! (add_val == 1 && loop_info->vtop
8138 && (bl->biv_count == 0
8139 || no_use_except_counting)))
8140 #endif
8141 && GET_CODE (comparison_value) == CONST_INT
8142 /* Now do postponed overflow checks on COMPARISON_VAL. */
8143 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8144 & comparison_sign_mask))
8146 /* Register will always be nonnegative, with value
8147 0 on last iteration */
8148 add_adjust = add_val;
8149 nonneg = 1;
8150 cmp_code = GE;
8152 else if (add_val == 1 && loop_info->vtop
8153 && (bl->biv_count == 0
8154 || no_use_except_counting))
8156 add_adjust = 0;
8157 cmp_code = NE;
8159 else
8160 return 0;
8162 if (GET_CODE (comparison) == LE)
8163 add_adjust -= add_val;
8165 /* If the initial value is not zero, or if the comparison
8166 value is not an exact multiple of the increment, then we
8167 can not reverse this loop. */
8168 if (initial_value == const0_rtx
8169 && GET_CODE (comparison_value) == CONST_INT)
8171 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8172 return 0;
8174 else
8176 if (! no_use_except_counting || add_val != 1)
8177 return 0;
8180 final_value = comparison_value;
8182 /* Reset these in case we normalized the initial value
8183 and comparison value above. */
8184 if (GET_CODE (comparison_value) == CONST_INT
8185 && GET_CODE (initial_value) == CONST_INT)
8187 comparison_value = GEN_INT (comparison_val);
8188 final_value
8189 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8191 bl->initial_value = initial_value;
8193 /* Save some info needed to produce the new insns. */
8194 reg = bl->biv->dest_reg;
8195 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
8196 if (jump_label == pc_rtx)
8197 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
8198 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
8200 /* Set start_value; if this is not a CONST_INT, we need
8201 to generate a SUB.
8202 Initialize biv to start_value before loop start.
8203 The old initializing insn will be deleted as a
8204 dead store by flow.c. */
8205 if (initial_value == const0_rtx
8206 && GET_CODE (comparison_value) == CONST_INT)
8208 start_value = GEN_INT (comparison_val - add_adjust);
8209 emit_insn_before (gen_move_insn (reg, start_value),
8210 loop_start);
8212 else if (GET_CODE (initial_value) == CONST_INT)
8214 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8215 enum machine_mode mode = GET_MODE (reg);
8216 enum insn_code icode
8217 = add_optab->handlers[(int) mode].insn_code;
8219 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
8220 || ! ((*insn_data[icode].operand[1].predicate)
8221 (comparison_value, mode))
8222 || ! ((*insn_data[icode].operand[2].predicate)
8223 (offset, mode)))
8224 return 0;
8225 start_value
8226 = gen_rtx_PLUS (mode, comparison_value, offset);
8227 emit_insn_before ((GEN_FCN (icode)
8228 (reg, comparison_value, offset)),
8229 loop_start);
8230 if (GET_CODE (comparison) == LE)
8231 final_value = gen_rtx_PLUS (mode, comparison_value,
8232 GEN_INT (add_val));
8234 else if (! add_adjust)
8236 enum machine_mode mode = GET_MODE (reg);
8237 enum insn_code icode
8238 = sub_optab->handlers[(int) mode].insn_code;
8239 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
8240 || ! ((*insn_data[icode].operand[1].predicate)
8241 (comparison_value, mode))
8242 || ! ((*insn_data[icode].operand[2].predicate)
8243 (initial_value, mode)))
8244 return 0;
8245 start_value
8246 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8247 emit_insn_before ((GEN_FCN (icode)
8248 (reg, comparison_value, initial_value)),
8249 loop_start);
8251 else
8252 /* We could handle the other cases too, but it'll be
8253 better to have a testcase first. */
8254 return 0;
8256 /* We may not have a single insn which can increment a reg, so
8257 create a sequence to hold all the insns from expand_inc. */
8258 start_sequence ();
8259 expand_inc (reg, new_add_val);
8260 tem = gen_sequence ();
8261 end_sequence ();
8263 p = emit_insn_before (tem, bl->biv->insn);
8264 delete_insn (bl->biv->insn);
8266 /* Update biv info to reflect its new status. */
8267 bl->biv->insn = p;
8268 bl->initial_value = start_value;
8269 bl->biv->add_val = new_add_val;
8271 /* Update loop info. */
8272 loop_info->initial_value = reg;
8273 loop_info->initial_equiv_value = reg;
8274 loop_info->final_value = const0_rtx;
8275 loop_info->final_equiv_value = const0_rtx;
8276 loop_info->comparison_value = const0_rtx;
8277 loop_info->comparison_code = cmp_code;
8278 loop_info->increment = new_add_val;
8280 /* Inc LABEL_NUSES so that delete_insn will
8281 not delete the label. */
8282 LABEL_NUSES (XEXP (jump_label, 0)) ++;
8284 /* Emit an insn after the end of the loop to set the biv's
8285 proper exit value if it is used anywhere outside the loop. */
8286 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8287 || ! bl->init_insn
8288 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8289 emit_insn_after (gen_move_insn (reg, final_value),
8290 loop_end);
8292 /* Delete compare/branch at end of loop. */
8293 delete_insn (PREV_INSN (loop_end));
8294 if (compare_and_branch == 2)
8295 delete_insn (first_compare);
8297 /* Add new compare/branch insn at end of loop. */
8298 start_sequence ();
8299 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8300 GET_MODE (reg), 0, 0,
8301 XEXP (jump_label, 0));
8302 tem = gen_sequence ();
8303 end_sequence ();
8304 emit_jump_insn_before (tem, loop_end);
8306 for (tem = PREV_INSN (loop_end);
8307 tem && GET_CODE (tem) != JUMP_INSN;
8308 tem = PREV_INSN (tem))
8311 if (tem)
8312 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8314 if (nonneg)
8316 if (tem)
8318 /* Increment of LABEL_NUSES done above. */
8319 /* Register is now always nonnegative,
8320 so add REG_NONNEG note to the branch. */
8321 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
8322 REG_NOTES (tem));
8324 bl->nonneg = 1;
8327 /* No insn may reference both the reversed and another biv or it
8328 will fail (see comment near the top of the loop reversal
8329 code).
8330 Earlier on, we have verified that the biv has no use except
8331 counting, or it is the only biv in this function.
8332 However, the code that computes no_use_except_counting does
8333 not verify reg notes. It's possible to have an insn that
8334 references another biv, and has a REG_EQUAL note with an
8335 expression based on the reversed biv. To avoid this case,
8336 remove all REG_EQUAL notes based on the reversed biv
8337 here. */
8338 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8339 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
8341 rtx *pnote;
8342 rtx set = single_set (p);
8343 /* If this is a set of a GIV based on the reversed biv, any
8344 REG_EQUAL notes should still be correct. */
8345 if (! set
8346 || GET_CODE (SET_DEST (set)) != REG
8347 || (size_t) REGNO (SET_DEST (set)) >= reg_iv_type->num_elements
8348 || REG_IV_TYPE (REGNO (SET_DEST (set))) != GENERAL_INDUCT
8349 || REG_IV_INFO (REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8350 for (pnote = &REG_NOTES (p); *pnote;)
8352 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8353 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8354 XEXP (*pnote, 0)))
8355 *pnote = XEXP (*pnote, 1);
8356 else
8357 pnote = &XEXP (*pnote, 1);
8361 /* Mark that this biv has been reversed. Each giv which depends
8362 on this biv, and which is also live past the end of the loop
8363 will have to be fixed up. */
8365 bl->reversed = 1;
8367 if (loop_dump_stream)
8369 fprintf (loop_dump_stream, "Reversed loop");
8370 if (bl->nonneg)
8371 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8372 else
8373 fprintf (loop_dump_stream, "\n");
8376 return 1;
8381 return 0;
8384 /* Verify whether the biv BL appears to be eliminable,
8385 based on the insns in the loop that refer to it.
8386 LOOP_START is the first insn of the loop, and END is the end insn.
8388 If ELIMINATE_P is non-zero, actually do the elimination.
8390 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8391 determine whether invariant insns should be placed inside or at the
8392 start of the loop. */
8394 static int
8395 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
8396 struct iv_class *bl;
8397 rtx loop_start;
8398 rtx end;
8399 int eliminate_p;
8400 int threshold, insn_count;
8402 rtx reg = bl->biv->dest_reg;
8403 rtx p;
8405 /* Scan all insns in the loop, stopping if we find one that uses the
8406 biv in a way that we cannot eliminate. */
8408 for (p = loop_start; p != end; p = NEXT_INSN (p))
8410 enum rtx_code code = GET_CODE (p);
8411 rtx where = threshold >= insn_count ? loop_start : p;
8413 /* If this is a libcall that sets a giv, skip ahead to its end. */
8414 if (GET_RTX_CLASS (code) == 'i')
8416 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8418 if (note)
8420 rtx last = XEXP (note, 0);
8421 rtx set = single_set (last);
8423 if (set && GET_CODE (SET_DEST (set)) == REG)
8425 int regno = REGNO (SET_DEST (set));
8427 if (regno < max_reg_before_loop
8428 && REG_IV_TYPE (regno) == GENERAL_INDUCT
8429 && REG_IV_INFO (regno)->src_reg == bl->biv->src_reg)
8430 p = last;
8434 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8435 && reg_mentioned_p (reg, PATTERN (p))
8436 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
8438 if (loop_dump_stream)
8439 fprintf (loop_dump_stream,
8440 "Cannot eliminate biv %d: biv used in insn %d.\n",
8441 bl->regno, INSN_UID (p));
8442 break;
8446 if (p == end)
8448 if (loop_dump_stream)
8449 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8450 bl->regno, eliminate_p ? "was" : "can be");
8451 return 1;
8454 return 0;
8457 /* INSN and REFERENCE are instructions in the same insn chain.
8458 Return non-zero if INSN is first. */
8461 loop_insn_first_p (insn, reference)
8462 rtx insn, reference;
8464 rtx p, q;
8466 for (p = insn, q = reference; ;)
8468 /* Start with test for not first so that INSN == REFERENCE yields not
8469 first. */
8470 if (q == insn || ! p)
8471 return 0;
8472 if (p == reference || ! q)
8473 return 1;
8475 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8476 previous insn, hence the <= comparison below does not work if
8477 P is a note. */
8478 if (INSN_UID (p) < max_uid_for_loop
8479 && INSN_UID (q) < max_uid_for_loop
8480 && GET_CODE (p) != NOTE)
8481 return INSN_LUID (p) <= INSN_LUID (q);
8483 if (INSN_UID (p) >= max_uid_for_loop
8484 || GET_CODE (p) == NOTE)
8485 p = NEXT_INSN (p);
8486 if (INSN_UID (q) >= max_uid_for_loop)
8487 q = NEXT_INSN (q);
8491 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8492 the offset that we have to take into account due to auto-increment /
8493 div derivation is zero. */
8494 static int
8495 biv_elimination_giv_has_0_offset (biv, giv, insn)
8496 struct induction *biv, *giv;
8497 rtx insn;
8499 /* If the giv V had the auto-inc address optimization applied
8500 to it, and INSN occurs between the giv insn and the biv
8501 insn, then we'd have to adjust the value used here.
8502 This is rare, so we don't bother to make this possible. */
8503 if (giv->auto_inc_opt
8504 && ((loop_insn_first_p (giv->insn, insn)
8505 && loop_insn_first_p (insn, biv->insn))
8506 || (loop_insn_first_p (biv->insn, insn)
8507 && loop_insn_first_p (insn, giv->insn))))
8508 return 0;
8510 /* If the giv V was derived from another giv, and INSN does
8511 not occur between the giv insn and the biv insn, then we'd
8512 have to adjust the value used here. This is rare, so we don't
8513 bother to make this possible. */
8514 if (giv->derived_from
8515 && ! (giv->always_executed
8516 && loop_insn_first_p (giv->insn, insn)
8517 && loop_insn_first_p (insn, biv->insn)))
8518 return 0;
8519 if (giv->same
8520 && giv->same->derived_from
8521 && ! (giv->same->always_executed
8522 && loop_insn_first_p (giv->same->insn, insn)
8523 && loop_insn_first_p (insn, biv->insn)))
8524 return 0;
8526 return 1;
8529 /* If BL appears in X (part of the pattern of INSN), see if we can
8530 eliminate its use. If so, return 1. If not, return 0.
8532 If BIV does not appear in X, return 1.
8534 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
8535 where extra insns should be added. Depending on how many items have been
8536 moved out of the loop, it will either be before INSN or at the start of
8537 the loop. */
8539 static int
8540 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
8541 rtx x, insn;
8542 struct iv_class *bl;
8543 int eliminate_p;
8544 rtx where;
8546 enum rtx_code code = GET_CODE (x);
8547 rtx reg = bl->biv->dest_reg;
8548 enum machine_mode mode = GET_MODE (reg);
8549 struct induction *v;
8550 rtx arg, tem;
8551 #ifdef HAVE_cc0
8552 rtx new;
8553 #endif
8554 int arg_operand;
8555 const char *fmt;
8556 int i, j;
8558 switch (code)
8560 case REG:
8561 /* If we haven't already been able to do something with this BIV,
8562 we can't eliminate it. */
8563 if (x == reg)
8564 return 0;
8565 return 1;
8567 case SET:
8568 /* If this sets the BIV, it is not a problem. */
8569 if (SET_DEST (x) == reg)
8570 return 1;
8572 /* If this is an insn that defines a giv, it is also ok because
8573 it will go away when the giv is reduced. */
8574 for (v = bl->giv; v; v = v->next_iv)
8575 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8576 return 1;
8578 #ifdef HAVE_cc0
8579 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8581 /* Can replace with any giv that was reduced and
8582 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8583 Require a constant for MULT_VAL, so we know it's nonzero.
8584 ??? We disable this optimization to avoid potential
8585 overflows. */
8587 for (v = bl->giv; v; v = v->next_iv)
8588 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
8589 && v->add_val == const0_rtx
8590 && ! v->ignore && ! v->maybe_dead && v->always_computable
8591 && v->mode == mode
8592 && 0)
8594 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8595 continue;
8597 if (! eliminate_p)
8598 return 1;
8600 /* If the giv has the opposite direction of change,
8601 then reverse the comparison. */
8602 if (INTVAL (v->mult_val) < 0)
8603 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8604 const0_rtx, v->new_reg);
8605 else
8606 new = v->new_reg;
8608 /* We can probably test that giv's reduced reg. */
8609 if (validate_change (insn, &SET_SRC (x), new, 0))
8610 return 1;
8613 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8614 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8615 Require a constant for MULT_VAL, so we know it's nonzero.
8616 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8617 overflow problem. */
8619 for (v = bl->giv; v; v = v->next_iv)
8620 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
8621 && ! v->ignore && ! v->maybe_dead && v->always_computable
8622 && v->mode == mode
8623 && (GET_CODE (v->add_val) == SYMBOL_REF
8624 || GET_CODE (v->add_val) == LABEL_REF
8625 || GET_CODE (v->add_val) == CONST
8626 || (GET_CODE (v->add_val) == REG
8627 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
8629 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8630 continue;
8632 if (! eliminate_p)
8633 return 1;
8635 /* If the giv has the opposite direction of change,
8636 then reverse the comparison. */
8637 if (INTVAL (v->mult_val) < 0)
8638 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8639 v->new_reg);
8640 else
8641 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8642 copy_rtx (v->add_val));
8644 /* Replace biv with the giv's reduced register. */
8645 update_reg_last_use (v->add_val, insn);
8646 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8647 return 1;
8649 /* Insn doesn't support that constant or invariant. Copy it
8650 into a register (it will be a loop invariant.) */
8651 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8653 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
8654 where);
8656 /* Substitute the new register for its invariant value in
8657 the compare expression. */
8658 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8659 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8660 return 1;
8663 #endif
8664 break;
8666 case COMPARE:
8667 case EQ: case NE:
8668 case GT: case GE: case GTU: case GEU:
8669 case LT: case LE: case LTU: case LEU:
8670 /* See if either argument is the biv. */
8671 if (XEXP (x, 0) == reg)
8672 arg = XEXP (x, 1), arg_operand = 1;
8673 else if (XEXP (x, 1) == reg)
8674 arg = XEXP (x, 0), arg_operand = 0;
8675 else
8676 break;
8678 if (CONSTANT_P (arg))
8680 /* First try to replace with any giv that has constant positive
8681 mult_val and constant add_val. We might be able to support
8682 negative mult_val, but it seems complex to do it in general. */
8684 for (v = bl->giv; v; v = v->next_iv)
8685 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
8686 && (GET_CODE (v->add_val) == SYMBOL_REF
8687 || GET_CODE (v->add_val) == LABEL_REF
8688 || GET_CODE (v->add_val) == CONST
8689 || (GET_CODE (v->add_val) == REG
8690 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
8691 && ! v->ignore && ! v->maybe_dead && v->always_computable
8692 && v->mode == mode)
8694 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8695 continue;
8697 if (! eliminate_p)
8698 return 1;
8700 /* Replace biv with the giv's reduced reg. */
8701 XEXP (x, 1-arg_operand) = v->new_reg;
8703 /* If all constants are actually constant integers and
8704 the derived constant can be directly placed in the COMPARE,
8705 do so. */
8706 if (GET_CODE (arg) == CONST_INT
8707 && GET_CODE (v->mult_val) == CONST_INT
8708 && GET_CODE (v->add_val) == CONST_INT
8709 && validate_change (insn, &XEXP (x, arg_operand),
8710 GEN_INT (INTVAL (arg)
8711 * INTVAL (v->mult_val)
8712 + INTVAL (v->add_val)), 0))
8713 return 1;
8715 /* Otherwise, load it into a register. */
8716 tem = gen_reg_rtx (mode);
8717 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
8718 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
8719 return 1;
8721 /* If that failed, put back the change we made above. */
8722 XEXP (x, 1-arg_operand) = reg;
8725 /* Look for giv with positive constant mult_val and nonconst add_val.
8726 Insert insns to calculate new compare value.
8727 ??? Turn this off due to possible overflow. */
8729 for (v = bl->giv; v; v = v->next_iv)
8730 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
8731 && ! v->ignore && ! v->maybe_dead && v->always_computable
8732 && v->mode == mode
8733 && 0)
8735 rtx tem;
8737 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8738 continue;
8740 if (! eliminate_p)
8741 return 1;
8743 tem = gen_reg_rtx (mode);
8745 /* Replace biv with giv's reduced register. */
8746 validate_change (insn, &XEXP (x, 1 - arg_operand),
8747 v->new_reg, 1);
8749 /* Compute value to compare against. */
8750 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
8751 /* Use it in this insn. */
8752 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8753 if (apply_change_group ())
8754 return 1;
8757 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8759 if (invariant_p (arg) == 1)
8761 /* Look for giv with constant positive mult_val and nonconst
8762 add_val. Insert insns to compute new compare value.
8763 ??? Turn this off due to possible overflow. */
8765 for (v = bl->giv; v; v = v->next_iv)
8766 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
8767 && ! v->ignore && ! v->maybe_dead && v->always_computable
8768 && v->mode == mode
8769 && 0)
8771 rtx tem;
8773 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8774 continue;
8776 if (! eliminate_p)
8777 return 1;
8779 tem = gen_reg_rtx (mode);
8781 /* Replace biv with giv's reduced register. */
8782 validate_change (insn, &XEXP (x, 1 - arg_operand),
8783 v->new_reg, 1);
8785 /* Compute value to compare against. */
8786 emit_iv_add_mult (arg, v->mult_val, v->add_val,
8787 tem, where);
8788 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8789 if (apply_change_group ())
8790 return 1;
8794 /* This code has problems. Basically, you can't know when
8795 seeing if we will eliminate BL, whether a particular giv
8796 of ARG will be reduced. If it isn't going to be reduced,
8797 we can't eliminate BL. We can try forcing it to be reduced,
8798 but that can generate poor code.
8800 The problem is that the benefit of reducing TV, below should
8801 be increased if BL can actually be eliminated, but this means
8802 we might have to do a topological sort of the order in which
8803 we try to process biv. It doesn't seem worthwhile to do
8804 this sort of thing now. */
8806 #if 0
8807 /* Otherwise the reg compared with had better be a biv. */
8808 if (GET_CODE (arg) != REG
8809 || REG_IV_TYPE (REGNO (arg)) != BASIC_INDUCT)
8810 return 0;
8812 /* Look for a pair of givs, one for each biv,
8813 with identical coefficients. */
8814 for (v = bl->giv; v; v = v->next_iv)
8816 struct induction *tv;
8818 if (v->ignore || v->maybe_dead || v->mode != mode)
8819 continue;
8821 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
8822 if (! tv->ignore && ! tv->maybe_dead
8823 && rtx_equal_p (tv->mult_val, v->mult_val)
8824 && rtx_equal_p (tv->add_val, v->add_val)
8825 && tv->mode == mode)
8827 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8828 continue;
8830 if (! eliminate_p)
8831 return 1;
8833 /* Replace biv with its giv's reduced reg. */
8834 XEXP (x, 1-arg_operand) = v->new_reg;
8835 /* Replace other operand with the other giv's
8836 reduced reg. */
8837 XEXP (x, arg_operand) = tv->new_reg;
8838 return 1;
8841 #endif
8844 /* If we get here, the biv can't be eliminated. */
8845 return 0;
8847 case MEM:
8848 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8849 biv is used in it, since it will be replaced. */
8850 for (v = bl->giv; v; v = v->next_iv)
8851 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8852 return 1;
8853 break;
8855 default:
8856 break;
8859 /* See if any subexpression fails elimination. */
8860 fmt = GET_RTX_FORMAT (code);
8861 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8863 switch (fmt[i])
8865 case 'e':
8866 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
8867 eliminate_p, where))
8868 return 0;
8869 break;
8871 case 'E':
8872 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8873 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
8874 eliminate_p, where))
8875 return 0;
8876 break;
8880 return 1;
8883 /* Return nonzero if the last use of REG
8884 is in an insn following INSN in the same basic block. */
8886 static int
8887 last_use_this_basic_block (reg, insn)
8888 rtx reg;
8889 rtx insn;
8891 rtx n;
8892 for (n = insn;
8893 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8894 n = NEXT_INSN (n))
8896 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8897 return 1;
8899 return 0;
8902 /* Called via `note_stores' to record the initial value of a biv. Here we
8903 just record the location of the set and process it later. */
8905 static void
8906 record_initial (dest, set, data)
8907 rtx dest;
8908 rtx set;
8909 void *data ATTRIBUTE_UNUSED;
8911 struct iv_class *bl;
8913 if (GET_CODE (dest) != REG
8914 || REGNO (dest) >= max_reg_before_loop
8915 || REG_IV_TYPE (REGNO (dest)) != BASIC_INDUCT)
8916 return;
8918 bl = reg_biv_class[REGNO (dest)];
8920 /* If this is the first set found, record it. */
8921 if (bl->init_insn == 0)
8923 bl->init_insn = note_insn;
8924 bl->init_set = set;
8928 /* If any of the registers in X are "old" and currently have a last use earlier
8929 than INSN, update them to have a last use of INSN. Their actual last use
8930 will be the previous insn but it will not have a valid uid_luid so we can't
8931 use it. */
8933 static void
8934 update_reg_last_use (x, insn)
8935 rtx x;
8936 rtx insn;
8938 /* Check for the case where INSN does not have a valid luid. In this case,
8939 there is no need to modify the regno_last_uid, as this can only happen
8940 when code is inserted after the loop_end to set a pseudo's final value,
8941 and hence this insn will never be the last use of x. */
8942 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8943 && INSN_UID (insn) < max_uid_for_loop
8944 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
8945 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8946 else
8948 register int i, j;
8949 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8950 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8952 if (fmt[i] == 'e')
8953 update_reg_last_use (XEXP (x, i), insn);
8954 else if (fmt[i] == 'E')
8955 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8956 update_reg_last_use (XVECEXP (x, i, j), insn);
8961 /* Given a jump insn JUMP, return the condition that will cause it to branch
8962 to its JUMP_LABEL. If the condition cannot be understood, or is an
8963 inequality floating-point comparison which needs to be reversed, 0 will
8964 be returned.
8966 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8967 insn used in locating the condition was found. If a replacement test
8968 of the condition is desired, it should be placed in front of that
8969 insn and we will be sure that the inputs are still valid.
8971 The condition will be returned in a canonical form to simplify testing by
8972 callers. Specifically:
8974 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8975 (2) Both operands will be machine operands; (cc0) will have been replaced.
8976 (3) If an operand is a constant, it will be the second operand.
8977 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8978 for GE, GEU, and LEU. */
8981 get_condition (jump, earliest)
8982 rtx jump;
8983 rtx *earliest;
8985 enum rtx_code code;
8986 rtx prev = jump;
8987 rtx set;
8988 rtx tem;
8989 rtx op0, op1;
8990 int reverse_code = 0;
8991 int did_reverse_condition = 0;
8992 enum machine_mode mode;
8994 /* If this is not a standard conditional jump, we can't parse it. */
8995 if (GET_CODE (jump) != JUMP_INSN
8996 || ! condjump_p (jump) || simplejump_p (jump))
8997 return 0;
8999 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
9000 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
9001 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
9002 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
9004 if (earliest)
9005 *earliest = jump;
9007 /* If this branches to JUMP_LABEL when the condition is false, reverse
9008 the condition. */
9009 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
9010 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
9011 code = reverse_condition (code), did_reverse_condition ^= 1;
9013 /* If we are comparing a register with zero, see if the register is set
9014 in the previous insn to a COMPARE or a comparison operation. Perform
9015 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9016 in cse.c */
9018 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
9020 /* Set non-zero when we find something of interest. */
9021 rtx x = 0;
9023 #ifdef HAVE_cc0
9024 /* If comparison with cc0, import actual comparison from compare
9025 insn. */
9026 if (op0 == cc0_rtx)
9028 if ((prev = prev_nonnote_insn (prev)) == 0
9029 || GET_CODE (prev) != INSN
9030 || (set = single_set (prev)) == 0
9031 || SET_DEST (set) != cc0_rtx)
9032 return 0;
9034 op0 = SET_SRC (set);
9035 op1 = CONST0_RTX (GET_MODE (op0));
9036 if (earliest)
9037 *earliest = prev;
9039 #endif
9041 /* If this is a COMPARE, pick up the two things being compared. */
9042 if (GET_CODE (op0) == COMPARE)
9044 op1 = XEXP (op0, 1);
9045 op0 = XEXP (op0, 0);
9046 continue;
9048 else if (GET_CODE (op0) != REG)
9049 break;
9051 /* Go back to the previous insn. Stop if it is not an INSN. We also
9052 stop if it isn't a single set or if it has a REG_INC note because
9053 we don't want to bother dealing with it. */
9055 if ((prev = prev_nonnote_insn (prev)) == 0
9056 || GET_CODE (prev) != INSN
9057 || FIND_REG_INC_NOTE (prev, 0)
9058 || (set = single_set (prev)) == 0)
9059 break;
9061 /* If this is setting OP0, get what it sets it to if it looks
9062 relevant. */
9063 if (rtx_equal_p (SET_DEST (set), op0))
9065 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
9067 /* ??? We may not combine comparisons done in a CCmode with
9068 comparisons not done in a CCmode. This is to aid targets
9069 like Alpha that have an IEEE compliant EQ instruction, and
9070 a non-IEEE compliant BEQ instruction. The use of CCmode is
9071 actually artificial, simply to prevent the combination, but
9072 should not affect other platforms.
9074 However, we must allow VOIDmode comparisons to match either
9075 CCmode or non-CCmode comparison, because some ports have
9076 modeless comparisons inside branch patterns.
9078 ??? This mode check should perhaps look more like the mode check
9079 in simplify_comparison in combine. */
9081 if ((GET_CODE (SET_SRC (set)) == COMPARE
9082 || (((code == NE
9083 || (code == LT
9084 && GET_MODE_CLASS (inner_mode) == MODE_INT
9085 && (GET_MODE_BITSIZE (inner_mode)
9086 <= HOST_BITS_PER_WIDE_INT)
9087 && (STORE_FLAG_VALUE
9088 & ((HOST_WIDE_INT) 1
9089 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9090 #ifdef FLOAT_STORE_FLAG_VALUE
9091 || (code == LT
9092 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9093 && FLOAT_STORE_FLAG_VALUE < 0)
9094 #endif
9096 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9097 && (((GET_MODE_CLASS (mode) == MODE_CC)
9098 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9099 || mode == VOIDmode || inner_mode == VOIDmode))
9100 x = SET_SRC (set);
9101 else if (((code == EQ
9102 || (code == GE
9103 && (GET_MODE_BITSIZE (inner_mode)
9104 <= HOST_BITS_PER_WIDE_INT)
9105 && GET_MODE_CLASS (inner_mode) == MODE_INT
9106 && (STORE_FLAG_VALUE
9107 & ((HOST_WIDE_INT) 1
9108 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9109 #ifdef FLOAT_STORE_FLAG_VALUE
9110 || (code == GE
9111 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9112 && FLOAT_STORE_FLAG_VALUE < 0)
9113 #endif
9115 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9116 && (((GET_MODE_CLASS (mode) == MODE_CC)
9117 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9118 || mode == VOIDmode || inner_mode == VOIDmode))
9121 /* We might have reversed a LT to get a GE here. But this wasn't
9122 actually the comparison of data, so we don't flag that we
9123 have had to reverse the condition. */
9124 did_reverse_condition ^= 1;
9125 reverse_code = 1;
9126 x = SET_SRC (set);
9128 else
9129 break;
9132 else if (reg_set_p (op0, prev))
9133 /* If this sets OP0, but not directly, we have to give up. */
9134 break;
9136 if (x)
9138 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9139 code = GET_CODE (x);
9140 if (reverse_code)
9142 code = reverse_condition (code);
9143 did_reverse_condition ^= 1;
9144 reverse_code = 0;
9147 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9148 if (earliest)
9149 *earliest = prev;
9153 /* If constant is first, put it last. */
9154 if (CONSTANT_P (op0))
9155 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9157 /* If OP0 is the result of a comparison, we weren't able to find what
9158 was really being compared, so fail. */
9159 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9160 return 0;
9162 /* Canonicalize any ordered comparison with integers involving equality
9163 if we can do computations in the relevant mode and we do not
9164 overflow. */
9166 if (GET_CODE (op1) == CONST_INT
9167 && GET_MODE (op0) != VOIDmode
9168 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9170 HOST_WIDE_INT const_val = INTVAL (op1);
9171 unsigned HOST_WIDE_INT uconst_val = const_val;
9172 unsigned HOST_WIDE_INT max_val
9173 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9175 switch (code)
9177 case LE:
9178 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9179 code = LT, op1 = GEN_INT (const_val + 1);
9180 break;
9182 /* When cross-compiling, const_val might be sign-extended from
9183 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9184 case GE:
9185 if ((HOST_WIDE_INT) (const_val & max_val)
9186 != (((HOST_WIDE_INT) 1
9187 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9188 code = GT, op1 = GEN_INT (const_val - 1);
9189 break;
9191 case LEU:
9192 if (uconst_val < max_val)
9193 code = LTU, op1 = GEN_INT (uconst_val + 1);
9194 break;
9196 case GEU:
9197 if (uconst_val != 0)
9198 code = GTU, op1 = GEN_INT (uconst_val - 1);
9199 break;
9201 default:
9202 break;
9206 /* If this was floating-point and we reversed anything other than an
9207 EQ or NE, return zero. */
9208 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
9209 && did_reverse_condition && code != NE && code != EQ
9210 && ! flag_fast_math
9211 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9212 return 0;
9214 #ifdef HAVE_cc0
9215 /* Never return CC0; return zero instead. */
9216 if (op0 == cc0_rtx)
9217 return 0;
9218 #endif
9220 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9223 /* Similar to above routine, except that we also put an invariant last
9224 unless both operands are invariants. */
9227 get_condition_for_loop (x)
9228 rtx x;
9230 rtx comparison = get_condition (x, NULL_PTR);
9232 if (comparison == 0
9233 || ! invariant_p (XEXP (comparison, 0))
9234 || invariant_p (XEXP (comparison, 1)))
9235 return comparison;
9237 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9238 XEXP (comparison, 1), XEXP (comparison, 0));
9241 #ifdef HAVE_decrement_and_branch_on_count
9242 /* Instrument loop for insertion of bct instruction. We distinguish between
9243 loops with compile-time bounds and those with run-time bounds.
9244 Information from loop_iterations() is used to compute compile-time bounds.
9245 Run-time bounds should use loop preconditioning, but currently ignored.
9248 static void
9249 insert_bct (loop_start, loop_end, loop_info)
9250 rtx loop_start, loop_end;
9251 struct loop_info *loop_info;
9253 int i;
9254 unsigned HOST_WIDE_INT n_iterations;
9256 int increment_direction, compare_direction;
9258 /* If the loop condition is <= or >=, the number of iteration
9259 is 1 more than the range of the bounds of the loop. */
9260 int add_iteration = 0;
9262 enum machine_mode loop_var_mode = word_mode;
9264 int loop_num = uid_loop_num [INSN_UID (loop_start)];
9266 /* It's impossible to instrument a competely unrolled loop. */
9267 if (loop_info->unroll_number == loop_info->n_iterations)
9268 return;
9270 /* Make sure that the count register is not in use. */
9271 if (loop_used_count_register [loop_num])
9273 if (loop_dump_stream)
9274 fprintf (loop_dump_stream,
9275 "insert_bct %d: BCT instrumentation failed: count register already in use\n",
9276 loop_num);
9277 return;
9280 /* Make sure that the function has no indirect jumps. */
9281 if (indirect_jump_in_function)
9283 if (loop_dump_stream)
9284 fprintf (loop_dump_stream,
9285 "insert_bct %d: BCT instrumentation failed: indirect jump in function\n",
9286 loop_num);
9287 return;
9290 /* Make sure that the last loop insn is a conditional jump. */
9291 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
9292 || ! condjump_p (PREV_INSN (loop_end))
9293 || simplejump_p (PREV_INSN (loop_end)))
9295 if (loop_dump_stream)
9296 fprintf (loop_dump_stream,
9297 "insert_bct %d: BCT instrumentation failed: invalid jump at loop end\n",
9298 loop_num);
9299 return;
9302 /* Make sure that the loop does not contain a function call
9303 (the count register might be altered by the called function). */
9304 if (loop_info->has_call)
9306 if (loop_dump_stream)
9307 fprintf (loop_dump_stream,
9308 "insert_bct %d: BCT instrumentation failed: function call in loop\n",
9309 loop_num);
9310 return;
9313 /* Make sure that the loop does not jump via a table.
9314 (the count register might be used to perform the branch on table). */
9315 if (loop_info->has_tablejump)
9317 if (loop_dump_stream)
9318 fprintf (loop_dump_stream,
9319 "insert_bct %d: BCT instrumentation failed: computed branch in the loop\n",
9320 loop_num);
9321 return;
9324 /* Account for loop unrolling in instrumented iteration count. */
9325 if (loop_info->unroll_number > 1)
9326 n_iterations = loop_info->n_iterations / loop_info->unroll_number;
9327 else
9328 n_iterations = loop_info->n_iterations;
9330 if (n_iterations != 0 && n_iterations < 3)
9332 /* Allow an enclosing outer loop to benefit if possible. */
9333 if (loop_dump_stream)
9334 fprintf (loop_dump_stream,
9335 "insert_bct %d: Too few iterations to benefit from BCT optimization\n",
9336 loop_num);
9337 return;
9340 /* Try to instrument the loop. */
9342 /* Handle the simpler case, where the bounds are known at compile time. */
9343 if (n_iterations > 0)
9345 /* Mark all enclosing loops that they cannot use count register. */
9346 for (i = loop_num; i != -1; i = loop_outer_loop[i])
9347 loop_used_count_register[i] = 1;
9348 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
9349 return;
9352 /* Handle the more complex case, that the bounds are NOT known
9353 at compile time. In this case we generate run_time calculation
9354 of the number of iterations. */
9356 if (loop_info->iteration_var == 0)
9358 if (loop_dump_stream)
9359 fprintf (loop_dump_stream,
9360 "insert_bct %d: BCT Runtime Instrumentation failed: no loop iteration variable found\n",
9361 loop_num);
9362 return;
9365 if (GET_MODE_CLASS (GET_MODE (loop_info->iteration_var)) != MODE_INT
9366 || GET_MODE_SIZE (GET_MODE (loop_info->iteration_var)) != UNITS_PER_WORD)
9368 if (loop_dump_stream)
9369 fprintf (loop_dump_stream,
9370 "insert_bct %d: BCT Runtime Instrumentation failed: loop variable not integer\n",
9371 loop_num);
9372 return;
9375 /* With runtime bounds, if the compare is of the form '!=' we give up */
9376 if (loop_info->comparison_code == NE)
9378 if (loop_dump_stream)
9379 fprintf (loop_dump_stream,
9380 "insert_bct %d: BCT Runtime Instrumentation failed: runtime bounds with != comparison\n",
9381 loop_num);
9382 return;
9384 /* Use common loop preconditioning code instead. */
9385 #if 0
9386 else
9388 /* We rely on the existence of run-time guard to ensure that the
9389 loop executes at least once. */
9390 rtx sequence;
9391 rtx iterations_num_reg;
9393 unsigned HOST_WIDE_INT increment_value_abs
9394 = INTVAL (increment) * increment_direction;
9396 /* make sure that the increment is a power of two, otherwise (an
9397 expensive) divide is needed. */
9398 if (exact_log2 (increment_value_abs) == -1)
9400 if (loop_dump_stream)
9401 fprintf (loop_dump_stream,
9402 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
9403 return;
9406 /* compute the number of iterations */
9407 start_sequence ();
9409 rtx temp_reg;
9411 /* Again, the number of iterations is calculated by:
9413 ; compare-val - initial-val + (increment -1) + additional-iteration
9414 ; num_iterations = -----------------------------------------------------------------
9415 ; increment
9417 /* ??? Do we have to call copy_rtx here before passing rtx to
9418 expand_binop? */
9419 if (compare_direction > 0)
9421 /* <, <= :the loop variable is increasing */
9422 temp_reg = expand_binop (loop_var_mode, sub_optab,
9423 comparison_value, initial_value,
9424 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9426 else
9428 temp_reg = expand_binop (loop_var_mode, sub_optab,
9429 initial_value, comparison_value,
9430 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9433 if (increment_value_abs - 1 + add_iteration != 0)
9434 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
9435 GEN_INT (increment_value_abs - 1
9436 + add_iteration),
9437 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9439 if (increment_value_abs != 1)
9440 iterations_num_reg = expand_binop (loop_var_mode, asr_optab,
9441 temp_reg,
9442 GEN_INT (exact_log2 (increment_value_abs)),
9443 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9444 else
9445 iterations_num_reg = temp_reg;
9447 sequence = gen_sequence ();
9448 end_sequence ();
9449 emit_insn_before (sequence, loop_start);
9450 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
9453 return;
9454 #endif /* Complex case */
9457 /* Instrument loop by inserting a bct in it as follows:
9458 1. A new counter register is created.
9459 2. In the head of the loop the new variable is initialized to the value
9460 passed in the loop_num_iterations parameter.
9461 3. At the end of the loop, comparison of the register with 0 is generated.
9462 The created comparison follows the pattern defined for the
9463 decrement_and_branch_on_count insn, so this insn will be generated.
9464 4. The branch on the old variable are deleted. The compare must remain
9465 because it might be used elsewhere. If the loop-variable or condition
9466 register are used elsewhere, they will be eliminated by flow. */
9468 static void
9469 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
9470 rtx loop_start, loop_end;
9471 rtx loop_num_iterations;
9473 rtx counter_reg;
9474 rtx start_label;
9475 rtx sequence;
9477 if (HAVE_decrement_and_branch_on_count)
9479 if (loop_dump_stream)
9481 fputs ("instrument_bct: Inserting BCT (", loop_dump_stream);
9482 if (GET_CODE (loop_num_iterations) == CONST_INT)
9483 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
9484 INTVAL (loop_num_iterations));
9485 else
9486 fputs ("runtime", loop_dump_stream);
9487 fputs (" iterations)", loop_dump_stream);
9490 /* Discard original jump to continue loop. Original compare result
9491 may still be live, so it cannot be discarded explicitly. */
9492 delete_insn (PREV_INSN (loop_end));
9494 /* Insert the label which will delimit the start of the loop. */
9495 start_label = gen_label_rtx ();
9496 emit_label_after (start_label, loop_start);
9498 /* Insert initialization of the count register into the loop header. */
9499 start_sequence ();
9500 counter_reg = gen_reg_rtx (word_mode);
9501 emit_insn (gen_move_insn (counter_reg, loop_num_iterations));
9502 sequence = gen_sequence ();
9503 end_sequence ();
9504 emit_insn_before (sequence, loop_start);
9506 /* Insert new comparison on the count register instead of the
9507 old one, generating the needed BCT pattern (that will be
9508 later recognized by assembly generation phase). */
9509 emit_jump_insn_before (gen_decrement_and_branch_on_count (counter_reg,
9510 start_label),
9511 loop_end);
9512 LABEL_NUSES (start_label)++;
9516 #endif /* HAVE_decrement_and_branch_on_count */
9518 /* Scan the function and determine whether it has indirect (computed) jumps.
9520 This is taken mostly from flow.c; similar code exists elsewhere
9521 in the compiler. It may be useful to put this into rtlanal.c. */
9522 static int
9523 indirect_jump_in_function_p (start)
9524 rtx start;
9526 rtx insn;
9528 for (insn = start; insn; insn = NEXT_INSN (insn))
9529 if (computed_jump_p (insn))
9530 return 1;
9532 return 0;
9535 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9536 documentation for LOOP_MEMS for the definition of `appropriate'.
9537 This function is called from prescan_loop via for_each_rtx. */
9539 static int
9540 insert_loop_mem (mem, data)
9541 rtx *mem;
9542 void *data ATTRIBUTE_UNUSED;
9544 int i;
9545 rtx m = *mem;
9547 if (m == NULL_RTX)
9548 return 0;
9550 switch (GET_CODE (m))
9552 case MEM:
9553 break;
9555 case CONST_DOUBLE:
9556 /* We're not interested in the MEM associated with a
9557 CONST_DOUBLE, so there's no need to traverse into this. */
9558 return -1;
9560 default:
9561 /* This is not a MEM. */
9562 return 0;
9565 /* See if we've already seen this MEM. */
9566 for (i = 0; i < loop_mems_idx; ++i)
9567 if (rtx_equal_p (m, loop_mems[i].mem))
9569 if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
9570 /* The modes of the two memory accesses are different. If
9571 this happens, something tricky is going on, and we just
9572 don't optimize accesses to this MEM. */
9573 loop_mems[i].optimize = 0;
9575 return 0;
9578 /* Resize the array, if necessary. */
9579 if (loop_mems_idx == loop_mems_allocated)
9581 if (loop_mems_allocated != 0)
9582 loop_mems_allocated *= 2;
9583 else
9584 loop_mems_allocated = 32;
9586 loop_mems = (loop_mem_info*)
9587 xrealloc (loop_mems,
9588 loop_mems_allocated * sizeof (loop_mem_info));
9591 /* Actually insert the MEM. */
9592 loop_mems[loop_mems_idx].mem = m;
9593 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9594 because we can't put it in a register. We still store it in the
9595 table, though, so that if we see the same address later, but in a
9596 non-BLK mode, we'll not think we can optimize it at that point. */
9597 loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
9598 loop_mems[loop_mems_idx].reg = NULL_RTX;
9599 ++loop_mems_idx;
9601 return 0;
9604 /* Like load_mems, but also ensures that SET_IN_LOOP,
9605 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
9606 values after load_mems. */
9608 static void
9609 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
9610 insn_count)
9611 rtx scan_start;
9612 rtx end;
9613 rtx loop_top;
9614 rtx start;
9615 int *insn_count;
9617 int nregs = max_reg_num ();
9619 load_mems (scan_start, end, loop_top, start);
9621 /* Recalculate set_in_loop and friends since load_mems may have
9622 created new registers. */
9623 if (max_reg_num () > nregs)
9625 int i;
9626 int old_nregs;
9628 old_nregs = nregs;
9629 nregs = max_reg_num ();
9631 if ((unsigned) nregs > set_in_loop->num_elements)
9633 /* Grow all the arrays. */
9634 VARRAY_GROW (set_in_loop, nregs);
9635 VARRAY_GROW (n_times_set, nregs);
9636 VARRAY_GROW (may_not_optimize, nregs);
9637 VARRAY_GROW (reg_single_usage, nregs);
9639 /* Clear the arrays */
9640 bzero ((char *) &set_in_loop->data, nregs * sizeof (int));
9641 bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
9642 bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
9644 count_loop_regs_set (loop_top ? loop_top : start, end,
9645 may_not_optimize, reg_single_usage,
9646 insn_count, nregs);
9648 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9650 VARRAY_CHAR (may_not_optimize, i) = 1;
9651 VARRAY_INT (set_in_loop, i) = 1;
9654 #ifdef AVOID_CCMODE_COPIES
9655 /* Don't try to move insns which set CC registers if we should not
9656 create CCmode register copies. */
9657 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9658 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9659 VARRAY_CHAR (may_not_optimize, i) = 1;
9660 #endif
9662 /* Set n_times_set for the new registers. */
9663 bcopy ((char *) (&set_in_loop->data.i[0] + old_nregs),
9664 (char *) (&n_times_set->data.i[0] + old_nregs),
9665 (nregs - old_nregs) * sizeof (int));
9669 /* Move MEMs into registers for the duration of the loop. SCAN_START
9670 is the first instruction in the loop (as it is executed). The
9671 other parameters are as for next_insn_in_loop. */
9673 static void
9674 load_mems (scan_start, end, loop_top, start)
9675 rtx scan_start;
9676 rtx end;
9677 rtx loop_top;
9678 rtx start;
9680 int maybe_never = 0;
9681 int i;
9682 rtx p;
9683 rtx label = NULL_RTX;
9684 rtx end_label = NULL_RTX;
9686 if (loop_mems_idx > 0)
9688 /* Nonzero if the next instruction may never be executed. */
9689 int next_maybe_never = 0;
9691 /* Check to see if it's possible that some instructions in the
9692 loop are never executed. */
9693 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
9694 p != NULL_RTX && !maybe_never;
9695 p = next_insn_in_loop (p, scan_start, end, loop_top))
9697 if (GET_CODE (p) == CODE_LABEL)
9698 maybe_never = 1;
9699 else if (GET_CODE (p) == JUMP_INSN
9700 /* If we enter the loop in the middle, and scan
9701 around to the beginning, don't set maybe_never
9702 for that. This must be an unconditional jump,
9703 otherwise the code at the top of the loop might
9704 never be executed. Unconditional jumps are
9705 followed a by barrier then loop end. */
9706 && ! (GET_CODE (p) == JUMP_INSN
9707 && JUMP_LABEL (p) == loop_top
9708 && NEXT_INSN (NEXT_INSN (p)) == end
9709 && simplejump_p (p)))
9711 if (!condjump_p (p))
9712 /* Something complicated. */
9713 maybe_never = 1;
9714 else
9715 /* If there are any more instructions in the loop, they
9716 might not be reached. */
9717 next_maybe_never = 1;
9719 else if (next_maybe_never)
9720 maybe_never = 1;
9723 /* Actually move the MEMs. */
9724 for (i = 0; i < loop_mems_idx; ++i)
9726 int written = 0;
9727 rtx reg;
9728 rtx mem = loop_mems[i].mem;
9729 rtx mem_list_entry;
9731 if (MEM_VOLATILE_P (mem)
9732 || invariant_p (XEXP (mem, 0)) != 1)
9733 /* There's no telling whether or not MEM is modified. */
9734 loop_mems[i].optimize = 0;
9736 /* Go through the MEMs written to in the loop to see if this
9737 one is aliased by one of them. */
9738 mem_list_entry = loop_store_mems;
9739 while (mem_list_entry)
9741 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9742 written = 1;
9743 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9744 mem, rtx_varies_p))
9746 /* MEM is indeed aliased by this store. */
9747 loop_mems[i].optimize = 0;
9748 break;
9750 mem_list_entry = XEXP (mem_list_entry, 1);
9753 /* If this MEM is written to, we must be sure that there
9754 are no reads from another MEM that aliases this one. */
9755 if (loop_mems[i].optimize && written)
9757 int j;
9759 for (j = 0; j < loop_mems_idx; ++j)
9761 if (j == i)
9762 continue;
9763 else if (true_dependence (mem,
9764 VOIDmode,
9765 loop_mems[j].mem,
9766 rtx_varies_p))
9768 /* It's not safe to hoist loop_mems[i] out of
9769 the loop because writes to it might not be
9770 seen by reads from loop_mems[j]. */
9771 loop_mems[i].optimize = 0;
9772 break;
9777 if (maybe_never && may_trap_p (mem))
9778 /* We can't access the MEM outside the loop; it might
9779 cause a trap that wouldn't have happened otherwise. */
9780 loop_mems[i].optimize = 0;
9782 if (!loop_mems[i].optimize)
9783 /* We thought we were going to lift this MEM out of the
9784 loop, but later discovered that we could not. */
9785 continue;
9787 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9788 order to keep scan_loop from moving stores to this MEM
9789 out of the loop just because this REG is neither a
9790 user-variable nor used in the loop test. */
9791 reg = gen_reg_rtx (GET_MODE (mem));
9792 REG_USERVAR_P (reg) = 1;
9793 loop_mems[i].reg = reg;
9795 /* Now, replace all references to the MEM with the
9796 corresponding pesudos. */
9797 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
9798 p != NULL_RTX;
9799 p = next_insn_in_loop (p, scan_start, end, loop_top))
9801 rtx_and_int ri;
9802 ri.r = p;
9803 ri.i = i;
9804 for_each_rtx (&p, replace_loop_mem, &ri);
9807 if (!apply_change_group ())
9808 /* We couldn't replace all occurrences of the MEM. */
9809 loop_mems[i].optimize = 0;
9810 else
9812 rtx set;
9814 /* Load the memory immediately before START, which is
9815 the NOTE_LOOP_BEG. */
9816 set = gen_move_insn (reg, mem);
9817 emit_insn_before (set, start);
9819 if (written)
9821 if (label == NULL_RTX)
9823 /* We must compute the former
9824 right-after-the-end label before we insert
9825 the new one. */
9826 end_label = next_label (end);
9827 label = gen_label_rtx ();
9828 emit_label_after (label, end);
9831 /* Store the memory immediately after END, which is
9832 the NOTE_LOOP_END. */
9833 set = gen_move_insn (copy_rtx (mem), reg);
9834 emit_insn_after (set, label);
9837 if (loop_dump_stream)
9839 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9840 REGNO (reg), (written ? "r/w" : "r/o"));
9841 print_rtl (loop_dump_stream, mem);
9842 fputc ('\n', loop_dump_stream);
9848 if (label != NULL_RTX)
9850 /* Now, we need to replace all references to the previous exit
9851 label with the new one. */
9852 rtx_pair rr;
9853 rr.r1 = end_label;
9854 rr.r2 = label;
9856 for (p = start; p != end; p = NEXT_INSN (p))
9858 for_each_rtx (&p, replace_label, &rr);
9860 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9861 field. This is not handled by for_each_rtx because it doesn't
9862 handle unprinted ('0') fields. We need to update JUMP_LABEL
9863 because the immediately following unroll pass will use it.
9864 replace_label would not work anyways, because that only handles
9865 LABEL_REFs. */
9866 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9867 JUMP_LABEL (p) = label;
9872 /* Replace MEM with its associated pseudo register. This function is
9873 called from load_mems via for_each_rtx. DATA is actually an
9874 rtx_and_int * describing the instruction currently being scanned
9875 and the MEM we are currently replacing. */
9877 static int
9878 replace_loop_mem (mem, data)
9879 rtx *mem;
9880 void *data;
9882 rtx_and_int *ri;
9883 rtx insn;
9884 int i;
9885 rtx m = *mem;
9887 if (m == NULL_RTX)
9888 return 0;
9890 switch (GET_CODE (m))
9892 case MEM:
9893 break;
9895 case CONST_DOUBLE:
9896 /* We're not interested in the MEM associated with a
9897 CONST_DOUBLE, so there's no need to traverse into one. */
9898 return -1;
9900 default:
9901 /* This is not a MEM. */
9902 return 0;
9905 ri = (rtx_and_int*) data;
9906 i = ri->i;
9908 if (!rtx_equal_p (loop_mems[i].mem, m))
9909 /* This is not the MEM we are currently replacing. */
9910 return 0;
9912 insn = ri->r;
9914 /* Actually replace the MEM. */
9915 validate_change (insn, mem, loop_mems[i].reg, 1);
9917 return 0;
9920 /* Replace occurrences of the old exit label for the loop with the new
9921 one. DATA is an rtx_pair containing the old and new labels,
9922 respectively. */
9924 static int
9925 replace_label (x, data)
9926 rtx *x;
9927 void *data;
9929 rtx l = *x;
9930 rtx old_label = ((rtx_pair*) data)->r1;
9931 rtx new_label = ((rtx_pair*) data)->r2;
9933 if (l == NULL_RTX)
9934 return 0;
9936 if (GET_CODE (l) != LABEL_REF)
9937 return 0;
9939 if (XEXP (l, 0) != old_label)
9940 return 0;
9942 XEXP (l, 0) = new_label;
9943 ++LABEL_NUSES (new_label);
9944 --LABEL_NUSES (old_label);
9946 return 0;