* ggc-page.c (struct page_entry): Remove save_num_free_objects.
[official-gcc.git] / gcc / loop.c
blob933f5fe68b41b9a1806904db1d107a4ec752d471
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "insn-config.h"
45 #include "insn-flags.h"
46 #include "regs.h"
47 #include "hard-reg-set.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "except.h"
53 #include "toplev.h"
55 /* Information about the loop being processed used to compute
56 the number of loop iterations for loop unrolling and doloop
57 optimization. */
58 static struct loop_info this_loop_info;
60 /* Vector mapping INSN_UIDs to luids.
61 The luids are like uids but increase monotonically always.
62 We use them to see whether a jump comes from outside a given loop. */
64 int *uid_luid;
66 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
67 number the insn is contained in. */
69 int *uid_loop_num;
71 /* 1 + largest uid of any insn. */
73 int max_uid_for_loop;
75 /* 1 + luid of last insn. */
77 static int max_luid;
79 /* Number of loops detected in current function. Used as index to the
80 next few tables. */
82 static int max_loop_num;
84 /* Indexed by loop number, contains the first and last insn of each loop. */
86 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
88 /* Likewise for the continue insn */
89 static rtx *loop_number_loop_cont;
91 /* The first code_label that is reached in every loop iteration.
92 0 when not computed yet, initially const0_rtx if a jump couldn't be
93 followed.
94 Also set to 0 when there is no such label before the NOTE_INSN_LOOP_CONT
95 of this loop, or in verify_dominator, if a jump couldn't be followed. */
96 static rtx *loop_number_cont_dominator;
98 /* For each loop, gives the containing loop number, -1 if none. */
100 int *loop_outer_loop;
102 #ifdef HAVE_decrement_and_branch_on_count
103 /* Records whether resource in use by inner loop. */
105 int *loop_used_count_register;
106 #endif /* HAVE_decrement_and_branch_on_count */
108 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
109 really a loop (an insn outside the loop branches into it). */
111 static char *loop_invalid;
113 /* Indexed by loop number, links together all LABEL_REFs which refer to
114 code labels outside the loop. Used by routines that need to know all
115 loop exits, such as final_biv_value and final_giv_value.
117 This does not include loop exits due to return instructions. This is
118 because all bivs and givs are pseudos, and hence must be dead after a
119 return, so the presense of a return does not affect any of the
120 optimizations that use this info. It is simpler to just not include return
121 instructions on this list. */
123 rtx *loop_number_exit_labels;
125 /* Indexed by loop number, counts the number of LABEL_REFs on
126 loop_number_exit_labels for this loop and all loops nested inside it. */
128 int *loop_number_exit_count;
130 /* Indexed by register number, contains the number of times the reg
131 is set during the loop being scanned.
132 During code motion, a negative value indicates a reg that has been
133 made a candidate; in particular -2 means that it is an candidate that
134 we know is equal to a constant and -1 means that it is an candidate
135 not known equal to a constant.
136 After code motion, regs moved have 0 (which is accurate now)
137 while the failed candidates have the original number of times set.
139 Therefore, at all times, == 0 indicates an invariant register;
140 < 0 a conditionally invariant one. */
142 static varray_type set_in_loop;
144 /* Original value of set_in_loop; same except that this value
145 is not set negative for a reg whose sets have been made candidates
146 and not set to 0 for a reg that is moved. */
148 static varray_type n_times_set;
150 /* Index by register number, 1 indicates that the register
151 cannot be moved or strength reduced. */
153 static varray_type may_not_optimize;
155 /* Contains the insn in which a register was used if it was used
156 exactly once; contains const0_rtx if it was used more than once. */
158 static varray_type reg_single_usage;
160 /* Nonzero means reg N has already been moved out of one loop.
161 This reduces the desire to move it out of another. */
163 static char *moved_once;
165 /* List of MEMs that are stored in this loop. */
167 static rtx loop_store_mems;
169 /* The insn where the first of these was found. */
170 static rtx first_loop_store_insn;
172 typedef struct loop_mem_info {
173 rtx mem; /* The MEM itself. */
174 rtx reg; /* Corresponding pseudo, if any. */
175 int optimize; /* Nonzero if we can optimize access to this MEM. */
176 } loop_mem_info;
178 /* Array of MEMs that are used (read or written) in this loop, but
179 cannot be aliased by anything in this loop, except perhaps
180 themselves. In other words, if loop_mems[i] is altered during the
181 loop, it is altered by an expression that is rtx_equal_p to it. */
183 static loop_mem_info *loop_mems;
185 /* The index of the next available slot in LOOP_MEMS. */
187 static int loop_mems_idx;
189 /* The number of elements allocated in LOOP_MEMs. */
191 static int loop_mems_allocated;
193 /* Nonzero if we don't know what MEMs were changed in the current
194 loop. This happens if the loop contains a call (in which case
195 `loop_info->has_call' will also be set) or if we store into more
196 than NUM_STORES MEMs. */
198 static int unknown_address_altered;
200 /* The above doesn't count any readonly memory locations that are stored.
201 This does. */
203 static int unknown_constant_address_altered;
205 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
206 static int num_movables;
208 /* Count of memory write instructions discovered in the loop. */
209 static int num_mem_sets;
211 /* Bound on pseudo register number before loop optimization.
212 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
213 int max_reg_before_loop;
215 /* This obstack is used in product_cheap_p to allocate its rtl. It
216 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
217 If we used the same obstack that it did, we would be deallocating
218 that array. */
220 static struct obstack temp_obstack;
222 /* This is where the pointer to the obstack being used for RTL is stored. */
224 extern struct obstack *rtl_obstack;
226 #define obstack_chunk_alloc xmalloc
227 #define obstack_chunk_free free
229 /* During the analysis of a loop, a chain of `struct movable's
230 is made to record all the movable insns found.
231 Then the entire chain can be scanned to decide which to move. */
233 struct movable
235 rtx insn; /* A movable insn */
236 rtx set_src; /* The expression this reg is set from. */
237 rtx set_dest; /* The destination of this SET. */
238 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
239 of any registers used within the LIBCALL. */
240 int consec; /* Number of consecutive following insns
241 that must be moved with this one. */
242 int regno; /* The register it sets */
243 short lifetime; /* lifetime of that register;
244 may be adjusted when matching movables
245 that load the same value are found. */
246 short savings; /* Number of insns we can move for this reg,
247 including other movables that force this
248 or match this one. */
249 unsigned int cond : 1; /* 1 if only conditionally movable */
250 unsigned int force : 1; /* 1 means MUST move this insn */
251 unsigned int global : 1; /* 1 means reg is live outside this loop */
252 /* If PARTIAL is 1, GLOBAL means something different:
253 that the reg is live outside the range from where it is set
254 to the following label. */
255 unsigned int done : 1; /* 1 inhibits further processing of this */
257 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
258 In particular, moving it does not make it
259 invariant. */
260 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
261 load SRC, rather than copying INSN. */
262 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
263 first insn of a consecutive sets group. */
264 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
265 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
266 that we should avoid changing when clearing
267 the rest of the reg. */
268 struct movable *match; /* First entry for same value */
269 struct movable *forces; /* An insn that must be moved if this is */
270 struct movable *next;
273 static struct movable *the_movables;
275 FILE *loop_dump_stream;
277 /* Forward declarations. */
279 static void verify_dominator PROTO((int));
280 static void find_and_verify_loops PROTO((rtx));
281 static void mark_loop_jump PROTO((rtx, int));
282 static void prescan_loop PROTO((rtx, rtx, struct loop_info *));
283 static int reg_in_basic_block_p PROTO((rtx, rtx));
284 static int consec_sets_invariant_p PROTO((rtx, int, rtx));
285 static int labels_in_range_p PROTO((rtx, int));
286 static void count_one_set PROTO((rtx, rtx, varray_type, rtx *));
288 static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type,
289 int *, int));
290 static void note_addr_stored PROTO((rtx, rtx, void *));
291 static void note_set_pseudo_multiple_uses PROTO((rtx, rtx, void *));
292 static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
293 static void scan_loop PROTO((rtx, rtx, rtx, int, int));
294 #if 0
295 static void replace_call_address PROTO((rtx, rtx, rtx));
296 #endif
297 static rtx skip_consec_insns PROTO((rtx, int));
298 static int libcall_benefit PROTO((rtx));
299 static void ignore_some_movables PROTO((struct movable *));
300 static void force_movables PROTO((struct movable *));
301 static void combine_movables PROTO((struct movable *, int));
302 static int regs_match_p PROTO((rtx, rtx, struct movable *));
303 static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
304 static void add_label_notes PROTO((rtx, rtx));
305 static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
306 static int count_nonfixed_reads PROTO((rtx));
307 static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx,
308 struct loop_info *, rtx, int, int));
309 static void find_single_use_in_loop PROTO((rtx, rtx, varray_type));
310 static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
311 static void find_mem_givs PROTO((rtx, rtx, int, int, rtx, rtx));
312 static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int));
313 static void check_final_value PROTO((struct induction *, rtx, rtx,
314 unsigned HOST_WIDE_INT));
315 static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, int, rtx *, rtx, rtx));
316 static void update_giv_derive PROTO((rtx));
317 static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *, rtx **));
318 static rtx simplify_giv_expr PROTO((rtx, int *));
319 static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
320 static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *, rtx *));
321 static int check_dbra_loop PROTO((rtx, int, rtx, struct loop_info *));
322 static rtx express_from_1 PROTO((rtx, rtx, rtx));
323 static rtx combine_givs_p PROTO((struct induction *, struct induction *));
324 static void combine_givs PROTO((struct iv_class *));
325 struct recombine_givs_stats;
326 static int find_life_end PROTO((rtx, struct recombine_givs_stats *, rtx, rtx));
327 static void recombine_givs PROTO((struct iv_class *, rtx, rtx, int));
328 static int product_cheap_p PROTO((rtx, rtx));
329 static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
330 static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
331 static int last_use_this_basic_block PROTO((rtx, rtx));
332 static void record_initial PROTO((rtx, rtx, void *));
333 static void update_reg_last_use PROTO((rtx, rtx));
334 static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
335 static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
336 rtx, int *));
337 static void load_mems PROTO((rtx, rtx, rtx, rtx));
338 static int insert_loop_mem PROTO((rtx *, void *));
339 static int replace_loop_mem PROTO((rtx *, void *));
340 static int replace_label PROTO((rtx *, void *));
342 typedef struct rtx_and_int {
343 rtx r;
344 int i;
345 } rtx_and_int;
347 typedef struct rtx_pair {
348 rtx r1;
349 rtx r2;
350 } rtx_pair;
352 /* Nonzero iff INSN is between START and END, inclusive. */
353 #define INSN_IN_RANGE_P(INSN, START, END) \
354 (INSN_UID (INSN) < max_uid_for_loop \
355 && INSN_LUID (INSN) >= INSN_LUID (START) \
356 && INSN_LUID (INSN) <= INSN_LUID (END))
358 #ifdef HAVE_decrement_and_branch_on_count
359 /* Test whether BCT applicable and safe. */
360 static void insert_bct PROTO((rtx, rtx, struct loop_info *));
362 /* Auxiliary function that inserts the BCT pattern into the loop. */
363 static void instrument_loop_bct PROTO((rtx, rtx, rtx));
364 #endif /* HAVE_decrement_and_branch_on_count */
366 /* Indirect_jump_in_function is computed once per function. */
367 int indirect_jump_in_function = 0;
368 static int indirect_jump_in_function_p PROTO((rtx));
370 static int compute_luids PROTO((rtx, rtx, int));
372 static int biv_elimination_giv_has_0_offset PROTO((struct induction *,
373 struct induction *, rtx));
375 /* Relative gain of eliminating various kinds of operations. */
376 static int add_cost;
377 #if 0
378 static int shift_cost;
379 static int mult_cost;
380 #endif
382 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
383 copy the value of the strength reduced giv to its original register. */
384 static int copy_cost;
386 /* Cost of using a register, to normalize the benefits of a giv. */
387 static int reg_address_cost;
390 void
391 init_loop ()
393 char *free_point = (char *) oballoc (1);
394 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
396 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
398 #ifdef ADDRESS_COST
399 reg_address_cost = ADDRESS_COST (reg);
400 #else
401 reg_address_cost = rtx_cost (reg, MEM);
402 #endif
404 /* We multiply by 2 to reconcile the difference in scale between
405 these two ways of computing costs. Otherwise the cost of a copy
406 will be far less than the cost of an add. */
408 copy_cost = 2 * 2;
410 /* Free the objects we just allocated. */
411 obfree (free_point);
413 /* Initialize the obstack used for rtl in product_cheap_p. */
414 gcc_obstack_init (&temp_obstack);
417 /* Compute the mapping from uids to luids.
418 LUIDs are numbers assigned to insns, like uids,
419 except that luids increase monotonically through the code.
420 Start at insn START and stop just before END. Assign LUIDs
421 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
422 static int
423 compute_luids (start, end, prev_luid)
424 rtx start, end;
425 int prev_luid;
427 int i;
428 rtx insn;
430 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
432 if (INSN_UID (insn) >= max_uid_for_loop)
433 continue;
434 /* Don't assign luids to line-number NOTEs, so that the distance in
435 luids between two insns is not affected by -g. */
436 if (GET_CODE (insn) != NOTE
437 || NOTE_LINE_NUMBER (insn) <= 0)
438 uid_luid[INSN_UID (insn)] = ++i;
439 else
440 /* Give a line number note the same luid as preceding insn. */
441 uid_luid[INSN_UID (insn)] = i;
443 return i + 1;
446 /* Entry point of this file. Perform loop optimization
447 on the current function. F is the first insn of the function
448 and DUMPFILE is a stream for output of a trace of actions taken
449 (or 0 if none should be output). */
451 void
452 loop_optimize (f, dumpfile, unroll_p, bct_p)
453 /* f is the first instruction of a chain of insns for one function */
454 rtx f;
455 FILE *dumpfile;
456 int unroll_p, bct_p;
458 register rtx insn;
459 register int i;
461 loop_dump_stream = dumpfile;
463 init_recog_no_volatile ();
465 max_reg_before_loop = max_reg_num ();
467 moved_once = (char *) alloca (max_reg_before_loop);
468 bzero (moved_once, max_reg_before_loop);
470 regs_may_share = 0;
472 /* Count the number of loops. */
474 max_loop_num = 0;
475 for (insn = f; insn; insn = NEXT_INSN (insn))
477 if (GET_CODE (insn) == NOTE
478 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
479 max_loop_num++;
482 /* Don't waste time if no loops. */
483 if (max_loop_num == 0)
484 return;
486 /* Get size to use for tables indexed by uids.
487 Leave some space for labels allocated by find_and_verify_loops. */
488 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
490 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
491 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
493 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
494 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
496 /* Allocate tables for recording each loop. We set each entry, so they need
497 not be zeroed. */
498 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
499 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
500 loop_number_loop_cont = (rtx *) alloca (max_loop_num * sizeof (rtx));
501 loop_number_cont_dominator = (rtx *) alloca (max_loop_num * sizeof (rtx));
502 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
503 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
504 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
505 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
507 #ifdef HAVE_decrement_and_branch_on_count
508 /* Allocate for BCT optimization */
509 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
510 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
511 #endif /* HAVE_decrement_and_branch_on_count */
513 /* Find and process each loop.
514 First, find them, and record them in order of their beginnings. */
515 find_and_verify_loops (f);
517 /* Now find all register lifetimes. This must be done after
518 find_and_verify_loops, because it might reorder the insns in the
519 function. */
520 reg_scan (f, max_reg_num (), 1);
522 /* This must occur after reg_scan so that registers created by gcse
523 will have entries in the register tables.
525 We could have added a call to reg_scan after gcse_main in toplev.c,
526 but moving this call to init_alias_analysis is more efficient. */
527 init_alias_analysis ();
529 /* See if we went too far. Note that get_max_uid already returns
530 one more that the maximum uid of all insn. */
531 if (get_max_uid () > max_uid_for_loop)
532 abort ();
533 /* Now reset it to the actual size we need. See above. */
534 max_uid_for_loop = get_max_uid ();
536 /* find_and_verify_loops has already called compute_luids, but it might
537 have rearranged code afterwards, so we need to recompute the luids now. */
538 max_luid = compute_luids (f, NULL_RTX, 0);
540 /* Don't leave gaps in uid_luid for insns that have been
541 deleted. It is possible that the first or last insn
542 using some register has been deleted by cross-jumping.
543 Make sure that uid_luid for that former insn's uid
544 points to the general area where that insn used to be. */
545 for (i = 0; i < max_uid_for_loop; i++)
547 uid_luid[0] = uid_luid[i];
548 if (uid_luid[0] != 0)
549 break;
551 for (i = 0; i < max_uid_for_loop; i++)
552 if (uid_luid[i] == 0)
553 uid_luid[i] = uid_luid[i - 1];
555 /* Create a mapping from loops to BLOCK tree nodes. */
556 if (unroll_p && write_symbols != NO_DEBUG)
557 find_loop_tree_blocks ();
559 /* Determine if the function has indirect jump. On some systems
560 this prevents low overhead loop instructions from being used. */
561 indirect_jump_in_function = indirect_jump_in_function_p (f);
563 /* Now scan the loops, last ones first, since this means inner ones are done
564 before outer ones. */
565 for (i = max_loop_num-1; i >= 0; i--)
566 if (! loop_invalid[i] && loop_number_loop_ends[i])
567 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
568 loop_number_loop_cont[i], unroll_p, bct_p);
570 /* If debugging and unrolling loops, we must replicate the tree nodes
571 corresponding to the blocks inside the loop, so that the original one
572 to one mapping will remain. */
573 if (unroll_p && write_symbols != NO_DEBUG)
574 unroll_block_trees ();
576 end_alias_analysis ();
579 /* Returns the next insn, in execution order, after INSN. START and
580 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
581 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
582 insn-stream; it is used with loops that are entered near the
583 bottom. */
585 static rtx
586 next_insn_in_loop (insn, start, end, loop_top)
587 rtx insn;
588 rtx start;
589 rtx end;
590 rtx loop_top;
592 insn = NEXT_INSN (insn);
594 if (insn == end)
596 if (loop_top)
597 /* Go to the top of the loop, and continue there. */
598 insn = loop_top;
599 else
600 /* We're done. */
601 insn = NULL_RTX;
604 if (insn == start)
605 /* We're done. */
606 insn = NULL_RTX;
608 return insn;
611 /* Optimize one loop whose start is LOOP_START and end is END.
612 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
613 NOTE_INSN_LOOP_END.
614 LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
616 /* ??? Could also move memory writes out of loops if the destination address
617 is invariant, the source is invariant, the memory write is not volatile,
618 and if we can prove that no read inside the loop can read this address
619 before the write occurs. If there is a read of this address after the
620 write, then we can also mark the memory read as invariant. */
622 static void
623 scan_loop (loop_start, end, loop_cont, unroll_p, bct_p)
624 rtx loop_start, end, loop_cont;
625 int unroll_p, bct_p;
627 register int i;
628 rtx p;
629 /* 1 if we are scanning insns that could be executed zero times. */
630 int maybe_never = 0;
631 /* 1 if we are scanning insns that might never be executed
632 due to a subroutine call which might exit before they are reached. */
633 int call_passed = 0;
634 /* For a rotated loop that is entered near the bottom,
635 this is the label at the top. Otherwise it is zero. */
636 rtx loop_top = 0;
637 /* Jump insn that enters the loop, or 0 if control drops in. */
638 rtx loop_entry_jump = 0;
639 /* Place in the loop where control enters. */
640 rtx scan_start;
641 /* Number of insns in the loop. */
642 int insn_count;
643 int in_libcall = 0;
644 int tem;
645 rtx temp;
646 /* The SET from an insn, if it is the only SET in the insn. */
647 rtx set, set1;
648 /* Chain describing insns movable in current loop. */
649 struct movable *movables = 0;
650 /* Last element in `movables' -- so we can add elements at the end. */
651 struct movable *last_movable = 0;
652 /* Ratio of extra register life span we can justify
653 for saving an instruction. More if loop doesn't call subroutines
654 since in that case saving an insn makes more difference
655 and more registers are available. */
656 int threshold;
657 /* Nonzero if we are scanning instructions in a sub-loop. */
658 int loop_depth = 0;
659 int nregs;
660 struct loop_info *loop_info = &this_loop_info;
662 /* Determine whether this loop starts with a jump down to a test at
663 the end. This will occur for a small number of loops with a test
664 that is too complex to duplicate in front of the loop.
666 We search for the first insn or label in the loop, skipping NOTEs.
667 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
668 (because we might have a loop executed only once that contains a
669 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
670 (in case we have a degenerate loop).
672 Note that if we mistakenly think that a loop is entered at the top
673 when, in fact, it is entered at the exit test, the only effect will be
674 slightly poorer optimization. Making the opposite error can generate
675 incorrect code. Since very few loops now start with a jump to the
676 exit test, the code here to detect that case is very conservative. */
678 for (p = NEXT_INSN (loop_start);
679 p != end
680 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
681 && (GET_CODE (p) != NOTE
682 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
683 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
684 p = NEXT_INSN (p))
687 scan_start = p;
689 /* Set up variables describing this loop. */
690 prescan_loop (loop_start, end, loop_info);
691 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
693 /* If loop has a jump before the first label,
694 the true entry is the target of that jump.
695 Start scan from there.
696 But record in LOOP_TOP the place where the end-test jumps
697 back to so we can scan that after the end of the loop. */
698 if (GET_CODE (p) == JUMP_INSN)
700 loop_entry_jump = p;
702 /* Loop entry must be unconditional jump (and not a RETURN) */
703 if (simplejump_p (p)
704 && JUMP_LABEL (p) != 0
705 /* Check to see whether the jump actually
706 jumps out of the loop (meaning it's no loop).
707 This case can happen for things like
708 do {..} while (0). If this label was generated previously
709 by loop, we can't tell anything about it and have to reject
710 the loop. */
711 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
713 loop_top = next_label (scan_start);
714 scan_start = JUMP_LABEL (p);
718 /* If SCAN_START was an insn created by loop, we don't know its luid
719 as required by loop_reg_used_before_p. So skip such loops. (This
720 test may never be true, but it's best to play it safe.)
722 Also, skip loops where we do not start scanning at a label. This
723 test also rejects loops starting with a JUMP_INSN that failed the
724 test above. */
726 if (INSN_UID (scan_start) >= max_uid_for_loop
727 || GET_CODE (scan_start) != CODE_LABEL)
729 if (loop_dump_stream)
730 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
731 INSN_UID (loop_start), INSN_UID (end));
732 return;
735 /* Count number of times each reg is set during this loop.
736 Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
737 the setting of register I. Set VARRAY_RTX (reg_single_usage, I). */
739 /* Allocate extra space for REGS that might be created by
740 load_mems. We allocate a little extra slop as well, in the hopes
741 that even after the moving of movables creates some new registers
742 we won't have to reallocate these arrays. However, we do grow
743 the arrays, if necessary, in load_mems_recount_loop_regs_set. */
744 nregs = max_reg_num () + loop_mems_idx + 16;
745 VARRAY_INT_INIT (set_in_loop, nregs, "set_in_loop");
746 VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
747 VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
748 VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
750 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
751 may_not_optimize, reg_single_usage, &insn_count, nregs);
753 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
755 VARRAY_CHAR (may_not_optimize, i) = 1;
756 VARRAY_INT (set_in_loop, i) = 1;
759 #ifdef AVOID_CCMODE_COPIES
760 /* Don't try to move insns which set CC registers if we should not
761 create CCmode register copies. */
762 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
763 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
764 VARRAY_CHAR (may_not_optimize, i) = 1;
765 #endif
767 bcopy ((char *) &set_in_loop->data,
768 (char *) &n_times_set->data, nregs * sizeof (int));
770 if (loop_dump_stream)
772 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
773 INSN_UID (loop_start), INSN_UID (end), insn_count);
774 if (loop_info->cont)
775 fprintf (loop_dump_stream, "Continue at insn %d.\n",
776 INSN_UID (loop_info->cont));
779 /* Scan through the loop finding insns that are safe to move.
780 Set set_in_loop negative for the reg being set, so that
781 this reg will be considered invariant for subsequent insns.
782 We consider whether subsequent insns use the reg
783 in deciding whether it is worth actually moving.
785 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
786 and therefore it is possible that the insns we are scanning
787 would never be executed. At such times, we must make sure
788 that it is safe to execute the insn once instead of zero times.
789 When MAYBE_NEVER is 0, all insns will be executed at least once
790 so that is not a problem. */
792 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
793 p != NULL_RTX;
794 p = next_insn_in_loop (p, scan_start, end, loop_top))
796 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
797 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
798 in_libcall = 1;
799 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
800 && find_reg_note (p, REG_RETVAL, NULL_RTX))
801 in_libcall = 0;
803 if (GET_CODE (p) == INSN
804 && (set = single_set (p))
805 && GET_CODE (SET_DEST (set)) == REG
806 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
808 int tem1 = 0;
809 int tem2 = 0;
810 int move_insn = 0;
811 rtx src = SET_SRC (set);
812 rtx dependencies = 0;
814 /* Figure out what to use as a source of this insn. If a REG_EQUIV
815 note is given or if a REG_EQUAL note with a constant operand is
816 specified, use it as the source and mark that we should move
817 this insn by calling emit_move_insn rather that duplicating the
818 insn.
820 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
821 is present. */
822 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
823 if (temp)
824 src = XEXP (temp, 0), move_insn = 1;
825 else
827 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
828 if (temp && CONSTANT_P (XEXP (temp, 0)))
829 src = XEXP (temp, 0), move_insn = 1;
830 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
832 src = XEXP (temp, 0);
833 /* A libcall block can use regs that don't appear in
834 the equivalent expression. To move the libcall,
835 we must move those regs too. */
836 dependencies = libcall_other_reg (p, src);
840 /* Don't try to optimize a register that was made
841 by loop-optimization for an inner loop.
842 We don't know its life-span, so we can't compute the benefit. */
843 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
845 else if (/* The register is used in basic blocks other
846 than the one where it is set (meaning that
847 something after this point in the loop might
848 depend on its value before the set). */
849 ! reg_in_basic_block_p (p, SET_DEST (set))
850 /* And the set is not guaranteed to be executed one
851 the loop starts, or the value before the set is
852 needed before the set occurs...
854 ??? Note we have quadratic behaviour here, mitigated
855 by the fact that the previous test will often fail for
856 large loops. Rather than re-scanning the entire loop
857 each time for register usage, we should build tables
858 of the register usage and use them here instead. */
859 && (maybe_never
860 || loop_reg_used_before_p (set, p, loop_start,
861 scan_start, end)))
862 /* It is unsafe to move the set.
864 This code used to consider it OK to move a set of a variable
865 which was not created by the user and not used in an exit test.
866 That behavior is incorrect and was removed. */
868 else if ((tem = invariant_p (src))
869 && (dependencies == 0
870 || (tem2 = invariant_p (dependencies)) != 0)
871 && (VARRAY_INT (set_in_loop,
872 REGNO (SET_DEST (set))) == 1
873 || (tem1
874 = consec_sets_invariant_p
875 (SET_DEST (set),
876 VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))),
877 p)))
878 /* If the insn can cause a trap (such as divide by zero),
879 can't move it unless it's guaranteed to be executed
880 once loop is entered. Even a function call might
881 prevent the trap insn from being reached
882 (since it might exit!) */
883 && ! ((maybe_never || call_passed)
884 && may_trap_p (src)))
886 register struct movable *m;
887 register int regno = REGNO (SET_DEST (set));
889 /* A potential lossage is where we have a case where two insns
890 can be combined as long as they are both in the loop, but
891 we move one of them outside the loop. For large loops,
892 this can lose. The most common case of this is the address
893 of a function being called.
895 Therefore, if this register is marked as being used exactly
896 once if we are in a loop with calls (a "large loop"), see if
897 we can replace the usage of this register with the source
898 of this SET. If we can, delete this insn.
900 Don't do this if P has a REG_RETVAL note or if we have
901 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
903 if (loop_info->has_call
904 && VARRAY_RTX (reg_single_usage, regno) != 0
905 && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
906 && REGNO_FIRST_UID (regno) == INSN_UID (p)
907 && (REGNO_LAST_UID (regno)
908 == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
909 && VARRAY_INT (set_in_loop, regno) == 1
910 && ! side_effects_p (SET_SRC (set))
911 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
912 && (! SMALL_REGISTER_CLASSES
913 || (! (GET_CODE (SET_SRC (set)) == REG
914 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
915 /* This test is not redundant; SET_SRC (set) might be
916 a call-clobbered register and the life of REGNO
917 might span a call. */
918 && ! modified_between_p (SET_SRC (set), p,
919 VARRAY_RTX
920 (reg_single_usage, regno))
921 && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
922 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
923 VARRAY_RTX
924 (reg_single_usage, regno)))
926 /* Replace any usage in a REG_EQUAL note. Must copy the
927 new source, so that we don't get rtx sharing between the
928 SET_SOURCE and REG_NOTES of insn p. */
929 REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
930 = replace_rtx (REG_NOTES (VARRAY_RTX
931 (reg_single_usage, regno)),
932 SET_DEST (set), copy_rtx (SET_SRC (set)));
934 PUT_CODE (p, NOTE);
935 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
936 NOTE_SOURCE_FILE (p) = 0;
937 VARRAY_INT (set_in_loop, regno) = 0;
938 continue;
941 m = (struct movable *) alloca (sizeof (struct movable));
942 m->next = 0;
943 m->insn = p;
944 m->set_src = src;
945 m->dependencies = dependencies;
946 m->set_dest = SET_DEST (set);
947 m->force = 0;
948 m->consec = VARRAY_INT (set_in_loop,
949 REGNO (SET_DEST (set))) - 1;
950 m->done = 0;
951 m->forces = 0;
952 m->partial = 0;
953 m->move_insn = move_insn;
954 m->move_insn_first = 0;
955 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
956 m->savemode = VOIDmode;
957 m->regno = regno;
958 /* Set M->cond if either invariant_p or consec_sets_invariant_p
959 returned 2 (only conditionally invariant). */
960 m->cond = ((tem | tem1 | tem2) > 1);
961 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
962 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
963 m->match = 0;
964 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
965 - uid_luid[REGNO_FIRST_UID (regno)]);
966 m->savings = VARRAY_INT (n_times_set, regno);
967 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
968 m->savings += libcall_benefit (p);
969 VARRAY_INT (set_in_loop, regno) = move_insn ? -2 : -1;
970 /* Add M to the end of the chain MOVABLES. */
971 if (movables == 0)
972 movables = m;
973 else
974 last_movable->next = m;
975 last_movable = m;
977 if (m->consec > 0)
979 /* It is possible for the first instruction to have a
980 REG_EQUAL note but a non-invariant SET_SRC, so we must
981 remember the status of the first instruction in case
982 the last instruction doesn't have a REG_EQUAL note. */
983 m->move_insn_first = m->move_insn;
985 /* Skip this insn, not checking REG_LIBCALL notes. */
986 p = next_nonnote_insn (p);
987 /* Skip the consecutive insns, if there are any. */
988 p = skip_consec_insns (p, m->consec);
989 /* Back up to the last insn of the consecutive group. */
990 p = prev_nonnote_insn (p);
992 /* We must now reset m->move_insn, m->is_equiv, and possibly
993 m->set_src to correspond to the effects of all the
994 insns. */
995 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
996 if (temp)
997 m->set_src = XEXP (temp, 0), m->move_insn = 1;
998 else
1000 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1001 if (temp && CONSTANT_P (XEXP (temp, 0)))
1002 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1003 else
1004 m->move_insn = 0;
1007 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1010 /* If this register is always set within a STRICT_LOW_PART
1011 or set to zero, then its high bytes are constant.
1012 So clear them outside the loop and within the loop
1013 just load the low bytes.
1014 We must check that the machine has an instruction to do so.
1015 Also, if the value loaded into the register
1016 depends on the same register, this cannot be done. */
1017 else if (SET_SRC (set) == const0_rtx
1018 && GET_CODE (NEXT_INSN (p)) == INSN
1019 && (set1 = single_set (NEXT_INSN (p)))
1020 && GET_CODE (set1) == SET
1021 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1022 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1023 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1024 == SET_DEST (set))
1025 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1027 register int regno = REGNO (SET_DEST (set));
1028 if (VARRAY_INT (set_in_loop, regno) == 2)
1030 register struct movable *m;
1031 m = (struct movable *) alloca (sizeof (struct movable));
1032 m->next = 0;
1033 m->insn = p;
1034 m->set_dest = SET_DEST (set);
1035 m->dependencies = 0;
1036 m->force = 0;
1037 m->consec = 0;
1038 m->done = 0;
1039 m->forces = 0;
1040 m->move_insn = 0;
1041 m->move_insn_first = 0;
1042 m->partial = 1;
1043 /* If the insn may not be executed on some cycles,
1044 we can't clear the whole reg; clear just high part.
1045 Not even if the reg is used only within this loop.
1046 Consider this:
1047 while (1)
1048 while (s != t) {
1049 if (foo ()) x = *s;
1050 use (x);
1052 Clearing x before the inner loop could clobber a value
1053 being saved from the last time around the outer loop.
1054 However, if the reg is not used outside this loop
1055 and all uses of the register are in the same
1056 basic block as the store, there is no problem.
1058 If this insn was made by loop, we don't know its
1059 INSN_LUID and hence must make a conservative
1060 assumption. */
1061 m->global = (INSN_UID (p) >= max_uid_for_loop
1062 || (uid_luid[REGNO_LAST_UID (regno)]
1063 > INSN_LUID (end))
1064 || (uid_luid[REGNO_FIRST_UID (regno)]
1065 < INSN_LUID (p))
1066 || (labels_in_range_p
1067 (p, uid_luid[REGNO_FIRST_UID (regno)])));
1068 if (maybe_never && m->global)
1069 m->savemode = GET_MODE (SET_SRC (set1));
1070 else
1071 m->savemode = VOIDmode;
1072 m->regno = regno;
1073 m->cond = 0;
1074 m->match = 0;
1075 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1076 - uid_luid[REGNO_FIRST_UID (regno)]);
1077 m->savings = 1;
1078 VARRAY_INT (set_in_loop, regno) = -1;
1079 /* Add M to the end of the chain MOVABLES. */
1080 if (movables == 0)
1081 movables = m;
1082 else
1083 last_movable->next = m;
1084 last_movable = m;
1088 /* Past a call insn, we get to insns which might not be executed
1089 because the call might exit. This matters for insns that trap.
1090 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1091 so they don't count. */
1092 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1093 call_passed = 1;
1094 /* Past a label or a jump, we get to insns for which we
1095 can't count on whether or how many times they will be
1096 executed during each iteration. Therefore, we can
1097 only move out sets of trivial variables
1098 (those not used after the loop). */
1099 /* Similar code appears twice in strength_reduce. */
1100 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1101 /* If we enter the loop in the middle, and scan around to the
1102 beginning, don't set maybe_never for that. This must be an
1103 unconditional jump, otherwise the code at the top of the
1104 loop might never be executed. Unconditional jumps are
1105 followed a by barrier then loop end. */
1106 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1107 && NEXT_INSN (NEXT_INSN (p)) == end
1108 && simplejump_p (p)))
1109 maybe_never = 1;
1110 else if (GET_CODE (p) == NOTE)
1112 /* At the virtual top of a converted loop, insns are again known to
1113 be executed: logically, the loop begins here even though the exit
1114 code has been duplicated. */
1115 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1116 maybe_never = call_passed = 0;
1117 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1118 loop_depth++;
1119 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1120 loop_depth--;
1124 /* If one movable subsumes another, ignore that other. */
1126 ignore_some_movables (movables);
1128 /* For each movable insn, see if the reg that it loads
1129 leads when it dies right into another conditionally movable insn.
1130 If so, record that the second insn "forces" the first one,
1131 since the second can be moved only if the first is. */
1133 force_movables (movables);
1135 /* See if there are multiple movable insns that load the same value.
1136 If there are, make all but the first point at the first one
1137 through the `match' field, and add the priorities of them
1138 all together as the priority of the first. */
1140 combine_movables (movables, nregs);
1142 /* Now consider each movable insn to decide whether it is worth moving.
1143 Store 0 in set_in_loop for each reg that is moved.
1145 Generally this increases code size, so do not move moveables when
1146 optimizing for code size. */
1148 if (! optimize_size)
1149 move_movables (movables, threshold,
1150 insn_count, loop_start, end, nregs);
1152 /* Now candidates that still are negative are those not moved.
1153 Change set_in_loop to indicate that those are not actually invariant. */
1154 for (i = 0; i < nregs; i++)
1155 if (VARRAY_INT (set_in_loop, i) < 0)
1156 VARRAY_INT (set_in_loop, i) = VARRAY_INT (n_times_set, i);
1158 /* Now that we've moved some things out of the loop, we might be able to
1159 hoist even more memory references. */
1160 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
1161 loop_start, &insn_count);
1163 if (flag_strength_reduce)
1165 the_movables = movables;
1166 strength_reduce (scan_start, end, loop_top,
1167 insn_count, loop_start, end,
1168 loop_info, loop_cont, unroll_p, bct_p);
1171 VARRAY_FREE (reg_single_usage);
1172 VARRAY_FREE (set_in_loop);
1173 VARRAY_FREE (n_times_set);
1174 VARRAY_FREE (may_not_optimize);
1177 /* Add elements to *OUTPUT to record all the pseudo-regs
1178 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1180 void
1181 record_excess_regs (in_this, not_in_this, output)
1182 rtx in_this, not_in_this;
1183 rtx *output;
1185 enum rtx_code code;
1186 const char *fmt;
1187 int i;
1189 code = GET_CODE (in_this);
1191 switch (code)
1193 case PC:
1194 case CC0:
1195 case CONST_INT:
1196 case CONST_DOUBLE:
1197 case CONST:
1198 case SYMBOL_REF:
1199 case LABEL_REF:
1200 return;
1202 case REG:
1203 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1204 && ! reg_mentioned_p (in_this, not_in_this))
1205 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1206 return;
1208 default:
1209 break;
1212 fmt = GET_RTX_FORMAT (code);
1213 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1215 int j;
1217 switch (fmt[i])
1219 case 'E':
1220 for (j = 0; j < XVECLEN (in_this, i); j++)
1221 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1222 break;
1224 case 'e':
1225 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1226 break;
1231 /* Check what regs are referred to in the libcall block ending with INSN,
1232 aside from those mentioned in the equivalent value.
1233 If there are none, return 0.
1234 If there are one or more, return an EXPR_LIST containing all of them. */
1237 libcall_other_reg (insn, equiv)
1238 rtx insn, equiv;
1240 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1241 rtx p = XEXP (note, 0);
1242 rtx output = 0;
1244 /* First, find all the regs used in the libcall block
1245 that are not mentioned as inputs to the result. */
1247 while (p != insn)
1249 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1250 || GET_CODE (p) == CALL_INSN)
1251 record_excess_regs (PATTERN (p), equiv, &output);
1252 p = NEXT_INSN (p);
1255 return output;
1258 /* Return 1 if all uses of REG
1259 are between INSN and the end of the basic block. */
1261 static int
1262 reg_in_basic_block_p (insn, reg)
1263 rtx insn, reg;
1265 int regno = REGNO (reg);
1266 rtx p;
1268 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1269 return 0;
1271 /* Search this basic block for the already recorded last use of the reg. */
1272 for (p = insn; p; p = NEXT_INSN (p))
1274 switch (GET_CODE (p))
1276 case NOTE:
1277 break;
1279 case INSN:
1280 case CALL_INSN:
1281 /* Ordinary insn: if this is the last use, we win. */
1282 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1283 return 1;
1284 break;
1286 case JUMP_INSN:
1287 /* Jump insn: if this is the last use, we win. */
1288 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1289 return 1;
1290 /* Otherwise, it's the end of the basic block, so we lose. */
1291 return 0;
1293 case CODE_LABEL:
1294 case BARRIER:
1295 /* It's the end of the basic block, so we lose. */
1296 return 0;
1298 default:
1299 break;
1303 /* The "last use" doesn't follow the "first use"?? */
1304 abort ();
1307 /* Compute the benefit of eliminating the insns in the block whose
1308 last insn is LAST. This may be a group of insns used to compute a
1309 value directly or can contain a library call. */
1311 static int
1312 libcall_benefit (last)
1313 rtx last;
1315 rtx insn;
1316 int benefit = 0;
1318 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1319 insn != last; insn = NEXT_INSN (insn))
1321 if (GET_CODE (insn) == CALL_INSN)
1322 benefit += 10; /* Assume at least this many insns in a library
1323 routine. */
1324 else if (GET_CODE (insn) == INSN
1325 && GET_CODE (PATTERN (insn)) != USE
1326 && GET_CODE (PATTERN (insn)) != CLOBBER)
1327 benefit++;
1330 return benefit;
1333 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1335 static rtx
1336 skip_consec_insns (insn, count)
1337 rtx insn;
1338 int count;
1340 for (; count > 0; count--)
1342 rtx temp;
1344 /* If first insn of libcall sequence, skip to end. */
1345 /* Do this at start of loop, since INSN is guaranteed to
1346 be an insn here. */
1347 if (GET_CODE (insn) != NOTE
1348 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1349 insn = XEXP (temp, 0);
1351 do insn = NEXT_INSN (insn);
1352 while (GET_CODE (insn) == NOTE);
1355 return insn;
1358 /* Ignore any movable whose insn falls within a libcall
1359 which is part of another movable.
1360 We make use of the fact that the movable for the libcall value
1361 was made later and so appears later on the chain. */
1363 static void
1364 ignore_some_movables (movables)
1365 struct movable *movables;
1367 register struct movable *m, *m1;
1369 for (m = movables; m; m = m->next)
1371 /* Is this a movable for the value of a libcall? */
1372 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1373 if (note)
1375 rtx insn;
1376 /* Check for earlier movables inside that range,
1377 and mark them invalid. We cannot use LUIDs here because
1378 insns created by loop.c for prior loops don't have LUIDs.
1379 Rather than reject all such insns from movables, we just
1380 explicitly check each insn in the libcall (since invariant
1381 libcalls aren't that common). */
1382 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1383 for (m1 = movables; m1 != m; m1 = m1->next)
1384 if (m1->insn == insn)
1385 m1->done = 1;
1390 /* For each movable insn, see if the reg that it loads
1391 leads when it dies right into another conditionally movable insn.
1392 If so, record that the second insn "forces" the first one,
1393 since the second can be moved only if the first is. */
1395 static void
1396 force_movables (movables)
1397 struct movable *movables;
1399 register struct movable *m, *m1;
1400 for (m1 = movables; m1; m1 = m1->next)
1401 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1402 if (!m1->partial && !m1->done)
1404 int regno = m1->regno;
1405 for (m = m1->next; m; m = m->next)
1406 /* ??? Could this be a bug? What if CSE caused the
1407 register of M1 to be used after this insn?
1408 Since CSE does not update regno_last_uid,
1409 this insn M->insn might not be where it dies.
1410 But very likely this doesn't matter; what matters is
1411 that M's reg is computed from M1's reg. */
1412 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1413 && !m->done)
1414 break;
1415 if (m != 0 && m->set_src == m1->set_dest
1416 /* If m->consec, m->set_src isn't valid. */
1417 && m->consec == 0)
1418 m = 0;
1420 /* Increase the priority of the moving the first insn
1421 since it permits the second to be moved as well. */
1422 if (m != 0)
1424 m->forces = m1;
1425 m1->lifetime += m->lifetime;
1426 m1->savings += m->savings;
1431 /* Find invariant expressions that are equal and can be combined into
1432 one register. */
1434 static void
1435 combine_movables (movables, nregs)
1436 struct movable *movables;
1437 int nregs;
1439 register struct movable *m;
1440 char *matched_regs = (char *) alloca (nregs);
1441 enum machine_mode mode;
1443 /* Regs that are set more than once are not allowed to match
1444 or be matched. I'm no longer sure why not. */
1445 /* Perhaps testing m->consec_sets would be more appropriate here? */
1447 for (m = movables; m; m = m->next)
1448 if (m->match == 0 && VARRAY_INT (n_times_set, m->regno) == 1 && !m->partial)
1450 register struct movable *m1;
1451 int regno = m->regno;
1453 bzero (matched_regs, nregs);
1454 matched_regs[regno] = 1;
1456 /* We want later insns to match the first one. Don't make the first
1457 one match any later ones. So start this loop at m->next. */
1458 for (m1 = m->next; m1; m1 = m1->next)
1459 if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_set, m1->regno) == 1
1460 /* A reg used outside the loop mustn't be eliminated. */
1461 && !m1->global
1462 /* A reg used for zero-extending mustn't be eliminated. */
1463 && !m1->partial
1464 && (matched_regs[m1->regno]
1467 /* Can combine regs with different modes loaded from the
1468 same constant only if the modes are the same or
1469 if both are integer modes with M wider or the same
1470 width as M1. The check for integer is redundant, but
1471 safe, since the only case of differing destination
1472 modes with equal sources is when both sources are
1473 VOIDmode, i.e., CONST_INT. */
1474 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1475 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1476 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1477 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1478 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1479 /* See if the source of M1 says it matches M. */
1480 && ((GET_CODE (m1->set_src) == REG
1481 && matched_regs[REGNO (m1->set_src)])
1482 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1483 movables))))
1484 && ((m->dependencies == m1->dependencies)
1485 || rtx_equal_p (m->dependencies, m1->dependencies)))
1487 m->lifetime += m1->lifetime;
1488 m->savings += m1->savings;
1489 m1->done = 1;
1490 m1->match = m;
1491 matched_regs[m1->regno] = 1;
1495 /* Now combine the regs used for zero-extension.
1496 This can be done for those not marked `global'
1497 provided their lives don't overlap. */
1499 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1500 mode = GET_MODE_WIDER_MODE (mode))
1502 register struct movable *m0 = 0;
1504 /* Combine all the registers for extension from mode MODE.
1505 Don't combine any that are used outside this loop. */
1506 for (m = movables; m; m = m->next)
1507 if (m->partial && ! m->global
1508 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1510 register struct movable *m1;
1511 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1512 int last = uid_luid[REGNO_LAST_UID (m->regno)];
1514 if (m0 == 0)
1516 /* First one: don't check for overlap, just record it. */
1517 m0 = m;
1518 continue;
1521 /* Make sure they extend to the same mode.
1522 (Almost always true.) */
1523 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1524 continue;
1526 /* We already have one: check for overlap with those
1527 already combined together. */
1528 for (m1 = movables; m1 != m; m1 = m1->next)
1529 if (m1 == m0 || (m1->partial && m1->match == m0))
1530 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1531 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
1532 goto overlap;
1534 /* No overlap: we can combine this with the others. */
1535 m0->lifetime += m->lifetime;
1536 m0->savings += m->savings;
1537 m->done = 1;
1538 m->match = m0;
1540 overlap: ;
1545 /* Return 1 if regs X and Y will become the same if moved. */
1547 static int
1548 regs_match_p (x, y, movables)
1549 rtx x, y;
1550 struct movable *movables;
1552 int xn = REGNO (x);
1553 int yn = REGNO (y);
1554 struct movable *mx, *my;
1556 for (mx = movables; mx; mx = mx->next)
1557 if (mx->regno == xn)
1558 break;
1560 for (my = movables; my; my = my->next)
1561 if (my->regno == yn)
1562 break;
1564 return (mx && my
1565 && ((mx->match == my->match && mx->match != 0)
1566 || mx->match == my
1567 || mx == my->match));
1570 /* Return 1 if X and Y are identical-looking rtx's.
1571 This is the Lisp function EQUAL for rtx arguments.
1573 If two registers are matching movables or a movable register and an
1574 equivalent constant, consider them equal. */
1576 static int
1577 rtx_equal_for_loop_p (x, y, movables)
1578 rtx x, y;
1579 struct movable *movables;
1581 register int i;
1582 register int j;
1583 register struct movable *m;
1584 register enum rtx_code code;
1585 register const char *fmt;
1587 if (x == y)
1588 return 1;
1589 if (x == 0 || y == 0)
1590 return 0;
1592 code = GET_CODE (x);
1594 /* If we have a register and a constant, they may sometimes be
1595 equal. */
1596 if (GET_CODE (x) == REG && VARRAY_INT (set_in_loop, REGNO (x)) == -2
1597 && CONSTANT_P (y))
1599 for (m = movables; m; m = m->next)
1600 if (m->move_insn && m->regno == REGNO (x)
1601 && rtx_equal_p (m->set_src, y))
1602 return 1;
1604 else if (GET_CODE (y) == REG && VARRAY_INT (set_in_loop, REGNO (y)) == -2
1605 && CONSTANT_P (x))
1607 for (m = movables; m; m = m->next)
1608 if (m->move_insn && m->regno == REGNO (y)
1609 && rtx_equal_p (m->set_src, x))
1610 return 1;
1613 /* Otherwise, rtx's of different codes cannot be equal. */
1614 if (code != GET_CODE (y))
1615 return 0;
1617 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1618 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1620 if (GET_MODE (x) != GET_MODE (y))
1621 return 0;
1623 /* These three types of rtx's can be compared nonrecursively. */
1624 if (code == REG)
1625 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1627 if (code == LABEL_REF)
1628 return XEXP (x, 0) == XEXP (y, 0);
1629 if (code == SYMBOL_REF)
1630 return XSTR (x, 0) == XSTR (y, 0);
1632 /* Compare the elements. If any pair of corresponding elements
1633 fail to match, return 0 for the whole things. */
1635 fmt = GET_RTX_FORMAT (code);
1636 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1638 switch (fmt[i])
1640 case 'w':
1641 if (XWINT (x, i) != XWINT (y, i))
1642 return 0;
1643 break;
1645 case 'i':
1646 if (XINT (x, i) != XINT (y, i))
1647 return 0;
1648 break;
1650 case 'E':
1651 /* Two vectors must have the same length. */
1652 if (XVECLEN (x, i) != XVECLEN (y, i))
1653 return 0;
1655 /* And the corresponding elements must match. */
1656 for (j = 0; j < XVECLEN (x, i); j++)
1657 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1658 return 0;
1659 break;
1661 case 'e':
1662 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1663 return 0;
1664 break;
1666 case 's':
1667 if (strcmp (XSTR (x, i), XSTR (y, i)))
1668 return 0;
1669 break;
1671 case 'u':
1672 /* These are just backpointers, so they don't matter. */
1673 break;
1675 case '0':
1676 break;
1678 /* It is believed that rtx's at this level will never
1679 contain anything but integers and other rtx's,
1680 except for within LABEL_REFs and SYMBOL_REFs. */
1681 default:
1682 abort ();
1685 return 1;
1688 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1689 insns in INSNS which use the reference. */
1691 static void
1692 add_label_notes (x, insns)
1693 rtx x;
1694 rtx insns;
1696 enum rtx_code code = GET_CODE (x);
1697 int i, j;
1698 const char *fmt;
1699 rtx insn;
1701 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1703 /* This code used to ignore labels that referred to dispatch tables to
1704 avoid flow generating (slighly) worse code.
1706 We no longer ignore such label references (see LABEL_REF handling in
1707 mark_jump_label for additional information). */
1708 for (insn = insns; insn; insn = NEXT_INSN (insn))
1709 if (reg_mentioned_p (XEXP (x, 0), insn))
1710 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1711 REG_NOTES (insn));
1714 fmt = GET_RTX_FORMAT (code);
1715 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1717 if (fmt[i] == 'e')
1718 add_label_notes (XEXP (x, i), insns);
1719 else if (fmt[i] == 'E')
1720 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1721 add_label_notes (XVECEXP (x, i, j), insns);
1725 /* Scan MOVABLES, and move the insns that deserve to be moved.
1726 If two matching movables are combined, replace one reg with the
1727 other throughout. */
1729 static void
1730 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1731 struct movable *movables;
1732 int threshold;
1733 int insn_count;
1734 rtx loop_start;
1735 rtx end;
1736 int nregs;
1738 rtx new_start = 0;
1739 register struct movable *m;
1740 register rtx p;
1741 /* Map of pseudo-register replacements to handle combining
1742 when we move several insns that load the same value
1743 into different pseudo-registers. */
1744 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1745 char *already_moved = (char *) alloca (nregs);
1747 bzero (already_moved, nregs);
1748 bzero ((char *) reg_map, nregs * sizeof (rtx));
1750 num_movables = 0;
1752 for (m = movables; m; m = m->next)
1754 /* Describe this movable insn. */
1756 if (loop_dump_stream)
1758 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1759 INSN_UID (m->insn), m->regno, m->lifetime);
1760 if (m->consec > 0)
1761 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1762 if (m->cond)
1763 fprintf (loop_dump_stream, "cond ");
1764 if (m->force)
1765 fprintf (loop_dump_stream, "force ");
1766 if (m->global)
1767 fprintf (loop_dump_stream, "global ");
1768 if (m->done)
1769 fprintf (loop_dump_stream, "done ");
1770 if (m->move_insn)
1771 fprintf (loop_dump_stream, "move-insn ");
1772 if (m->match)
1773 fprintf (loop_dump_stream, "matches %d ",
1774 INSN_UID (m->match->insn));
1775 if (m->forces)
1776 fprintf (loop_dump_stream, "forces %d ",
1777 INSN_UID (m->forces->insn));
1780 /* Count movables. Value used in heuristics in strength_reduce. */
1781 num_movables++;
1783 /* Ignore the insn if it's already done (it matched something else).
1784 Otherwise, see if it is now safe to move. */
1786 if (!m->done
1787 && (! m->cond
1788 || (1 == invariant_p (m->set_src)
1789 && (m->dependencies == 0
1790 || 1 == invariant_p (m->dependencies))
1791 && (m->consec == 0
1792 || 1 == consec_sets_invariant_p (m->set_dest,
1793 m->consec + 1,
1794 m->insn))))
1795 && (! m->forces || m->forces->done))
1797 register int regno;
1798 register rtx p;
1799 int savings = m->savings;
1801 /* We have an insn that is safe to move.
1802 Compute its desirability. */
1804 p = m->insn;
1805 regno = m->regno;
1807 if (loop_dump_stream)
1808 fprintf (loop_dump_stream, "savings %d ", savings);
1810 if (moved_once[regno] && loop_dump_stream)
1811 fprintf (loop_dump_stream, "halved since already moved ");
1813 /* An insn MUST be moved if we already moved something else
1814 which is safe only if this one is moved too: that is,
1815 if already_moved[REGNO] is nonzero. */
1817 /* An insn is desirable to move if the new lifetime of the
1818 register is no more than THRESHOLD times the old lifetime.
1819 If it's not desirable, it means the loop is so big
1820 that moving won't speed things up much,
1821 and it is liable to make register usage worse. */
1823 /* It is also desirable to move if it can be moved at no
1824 extra cost because something else was already moved. */
1826 if (already_moved[regno]
1827 || flag_move_all_movables
1828 || (threshold * savings * m->lifetime) >=
1829 (moved_once[regno] ? insn_count * 2 : insn_count)
1830 || (m->forces && m->forces->done
1831 && VARRAY_INT (n_times_set, m->forces->regno) == 1))
1833 int count;
1834 register struct movable *m1;
1835 rtx first = NULL_RTX;
1837 /* Now move the insns that set the reg. */
1839 if (m->partial && m->match)
1841 rtx newpat, i1;
1842 rtx r1, r2;
1843 /* Find the end of this chain of matching regs.
1844 Thus, we load each reg in the chain from that one reg.
1845 And that reg is loaded with 0 directly,
1846 since it has ->match == 0. */
1847 for (m1 = m; m1->match; m1 = m1->match);
1848 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1849 SET_DEST (PATTERN (m1->insn)));
1850 i1 = emit_insn_before (newpat, loop_start);
1852 /* Mark the moved, invariant reg as being allowed to
1853 share a hard reg with the other matching invariant. */
1854 REG_NOTES (i1) = REG_NOTES (m->insn);
1855 r1 = SET_DEST (PATTERN (m->insn));
1856 r2 = SET_DEST (PATTERN (m1->insn));
1857 regs_may_share
1858 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1859 gen_rtx_EXPR_LIST (VOIDmode, r2,
1860 regs_may_share));
1861 delete_insn (m->insn);
1863 if (new_start == 0)
1864 new_start = i1;
1866 if (loop_dump_stream)
1867 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1869 /* If we are to re-generate the item being moved with a
1870 new move insn, first delete what we have and then emit
1871 the move insn before the loop. */
1872 else if (m->move_insn)
1874 rtx i1, temp;
1876 for (count = m->consec; count >= 0; count--)
1878 /* If this is the first insn of a library call sequence,
1879 skip to the end. */
1880 if (GET_CODE (p) != NOTE
1881 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1882 p = XEXP (temp, 0);
1884 /* If this is the last insn of a libcall sequence, then
1885 delete every insn in the sequence except the last.
1886 The last insn is handled in the normal manner. */
1887 if (GET_CODE (p) != NOTE
1888 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1890 temp = XEXP (temp, 0);
1891 while (temp != p)
1892 temp = delete_insn (temp);
1895 temp = p;
1896 p = delete_insn (p);
1898 /* simplify_giv_expr expects that it can walk the insns
1899 at m->insn forwards and see this old sequence we are
1900 tossing here. delete_insn does preserve the next
1901 pointers, but when we skip over a NOTE we must fix
1902 it up. Otherwise that code walks into the non-deleted
1903 insn stream. */
1904 while (p && GET_CODE (p) == NOTE)
1905 p = NEXT_INSN (temp) = NEXT_INSN (p);
1908 start_sequence ();
1909 emit_move_insn (m->set_dest, m->set_src);
1910 temp = get_insns ();
1911 end_sequence ();
1913 add_label_notes (m->set_src, temp);
1915 i1 = emit_insns_before (temp, loop_start);
1916 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1917 REG_NOTES (i1)
1918 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1919 m->set_src, REG_NOTES (i1));
1921 if (loop_dump_stream)
1922 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1924 /* The more regs we move, the less we like moving them. */
1925 threshold -= 3;
1927 else
1929 for (count = m->consec; count >= 0; count--)
1931 rtx i1, temp;
1933 /* If first insn of libcall sequence, skip to end. */
1934 /* Do this at start of loop, since p is guaranteed to
1935 be an insn here. */
1936 if (GET_CODE (p) != NOTE
1937 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1938 p = XEXP (temp, 0);
1940 /* If last insn of libcall sequence, move all
1941 insns except the last before the loop. The last
1942 insn is handled in the normal manner. */
1943 if (GET_CODE (p) != NOTE
1944 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1946 rtx fn_address = 0;
1947 rtx fn_reg = 0;
1948 rtx fn_address_insn = 0;
1950 first = 0;
1951 for (temp = XEXP (temp, 0); temp != p;
1952 temp = NEXT_INSN (temp))
1954 rtx body;
1955 rtx n;
1956 rtx next;
1958 if (GET_CODE (temp) == NOTE)
1959 continue;
1961 body = PATTERN (temp);
1963 /* Find the next insn after TEMP,
1964 not counting USE or NOTE insns. */
1965 for (next = NEXT_INSN (temp); next != p;
1966 next = NEXT_INSN (next))
1967 if (! (GET_CODE (next) == INSN
1968 && GET_CODE (PATTERN (next)) == USE)
1969 && GET_CODE (next) != NOTE)
1970 break;
1972 /* If that is the call, this may be the insn
1973 that loads the function address.
1975 Extract the function address from the insn
1976 that loads it into a register.
1977 If this insn was cse'd, we get incorrect code.
1979 So emit a new move insn that copies the
1980 function address into the register that the
1981 call insn will use. flow.c will delete any
1982 redundant stores that we have created. */
1983 if (GET_CODE (next) == CALL_INSN
1984 && GET_CODE (body) == SET
1985 && GET_CODE (SET_DEST (body)) == REG
1986 && (n = find_reg_note (temp, REG_EQUAL,
1987 NULL_RTX)))
1989 fn_reg = SET_SRC (body);
1990 if (GET_CODE (fn_reg) != REG)
1991 fn_reg = SET_DEST (body);
1992 fn_address = XEXP (n, 0);
1993 fn_address_insn = temp;
1995 /* We have the call insn.
1996 If it uses the register we suspect it might,
1997 load it with the correct address directly. */
1998 if (GET_CODE (temp) == CALL_INSN
1999 && fn_address != 0
2000 && reg_referenced_p (fn_reg, body))
2001 emit_insn_after (gen_move_insn (fn_reg,
2002 fn_address),
2003 fn_address_insn);
2005 if (GET_CODE (temp) == CALL_INSN)
2007 i1 = emit_call_insn_before (body, loop_start);
2008 /* Because the USAGE information potentially
2009 contains objects other than hard registers
2010 we need to copy it. */
2011 if (CALL_INSN_FUNCTION_USAGE (temp))
2012 CALL_INSN_FUNCTION_USAGE (i1)
2013 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2015 else
2016 i1 = emit_insn_before (body, loop_start);
2017 if (first == 0)
2018 first = i1;
2019 if (temp == fn_address_insn)
2020 fn_address_insn = i1;
2021 REG_NOTES (i1) = REG_NOTES (temp);
2022 delete_insn (temp);
2024 if (new_start == 0)
2025 new_start = first;
2027 if (m->savemode != VOIDmode)
2029 /* P sets REG to zero; but we should clear only
2030 the bits that are not covered by the mode
2031 m->savemode. */
2032 rtx reg = m->set_dest;
2033 rtx sequence;
2034 rtx tem;
2036 start_sequence ();
2037 tem = expand_binop
2038 (GET_MODE (reg), and_optab, reg,
2039 GEN_INT ((((HOST_WIDE_INT) 1
2040 << GET_MODE_BITSIZE (m->savemode)))
2041 - 1),
2042 reg, 1, OPTAB_LIB_WIDEN);
2043 if (tem == 0)
2044 abort ();
2045 if (tem != reg)
2046 emit_move_insn (reg, tem);
2047 sequence = gen_sequence ();
2048 end_sequence ();
2049 i1 = emit_insn_before (sequence, loop_start);
2051 else if (GET_CODE (p) == CALL_INSN)
2053 i1 = emit_call_insn_before (PATTERN (p), loop_start);
2054 /* Because the USAGE information potentially
2055 contains objects other than hard registers
2056 we need to copy it. */
2057 if (CALL_INSN_FUNCTION_USAGE (p))
2058 CALL_INSN_FUNCTION_USAGE (i1)
2059 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2061 else if (count == m->consec && m->move_insn_first)
2063 /* The SET_SRC might not be invariant, so we must
2064 use the REG_EQUAL note. */
2065 start_sequence ();
2066 emit_move_insn (m->set_dest, m->set_src);
2067 temp = get_insns ();
2068 end_sequence ();
2070 add_label_notes (m->set_src, temp);
2072 i1 = emit_insns_before (temp, loop_start);
2073 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2074 REG_NOTES (i1)
2075 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
2076 : REG_EQUAL),
2077 m->set_src, REG_NOTES (i1));
2079 else
2080 i1 = emit_insn_before (PATTERN (p), loop_start);
2082 if (REG_NOTES (i1) == 0)
2084 REG_NOTES (i1) = REG_NOTES (p);
2086 /* If there is a REG_EQUAL note present whose value
2087 is not loop invariant, then delete it, since it
2088 may cause problems with later optimization passes.
2089 It is possible for cse to create such notes
2090 like this as a result of record_jump_cond. */
2092 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2093 && ! invariant_p (XEXP (temp, 0)))
2094 remove_note (i1, temp);
2097 if (new_start == 0)
2098 new_start = i1;
2100 if (loop_dump_stream)
2101 fprintf (loop_dump_stream, " moved to %d",
2102 INSN_UID (i1));
2104 /* If library call, now fix the REG_NOTES that contain
2105 insn pointers, namely REG_LIBCALL on FIRST
2106 and REG_RETVAL on I1. */
2107 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2109 XEXP (temp, 0) = first;
2110 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2111 XEXP (temp, 0) = i1;
2114 temp = p;
2115 delete_insn (p);
2116 p = NEXT_INSN (p);
2118 /* simplify_giv_expr expects that it can walk the insns
2119 at m->insn forwards and see this old sequence we are
2120 tossing here. delete_insn does preserve the next
2121 pointers, but when we skip over a NOTE we must fix
2122 it up. Otherwise that code walks into the non-deleted
2123 insn stream. */
2124 while (p && GET_CODE (p) == NOTE)
2125 p = NEXT_INSN (temp) = NEXT_INSN (p);
2128 /* The more regs we move, the less we like moving them. */
2129 threshold -= 3;
2132 /* Any other movable that loads the same register
2133 MUST be moved. */
2134 already_moved[regno] = 1;
2136 /* This reg has been moved out of one loop. */
2137 moved_once[regno] = 1;
2139 /* The reg set here is now invariant. */
2140 if (! m->partial)
2141 VARRAY_INT (set_in_loop, regno) = 0;
2143 m->done = 1;
2145 /* Change the length-of-life info for the register
2146 to say it lives at least the full length of this loop.
2147 This will help guide optimizations in outer loops. */
2149 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
2150 /* This is the old insn before all the moved insns.
2151 We can't use the moved insn because it is out of range
2152 in uid_luid. Only the old insns have luids. */
2153 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2154 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2155 REGNO_LAST_UID (regno) = INSN_UID (end);
2157 /* Combine with this moved insn any other matching movables. */
2159 if (! m->partial)
2160 for (m1 = movables; m1; m1 = m1->next)
2161 if (m1->match == m)
2163 rtx temp;
2165 /* Schedule the reg loaded by M1
2166 for replacement so that shares the reg of M.
2167 If the modes differ (only possible in restricted
2168 circumstances, make a SUBREG.
2170 Note this assumes that the target dependent files
2171 treat REG and SUBREG equally, including within
2172 GO_IF_LEGITIMATE_ADDRESS and in all the
2173 predicates since we never verify that replacing the
2174 original register with a SUBREG results in a
2175 recognizable insn. */
2176 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2177 reg_map[m1->regno] = m->set_dest;
2178 else
2179 reg_map[m1->regno]
2180 = gen_lowpart_common (GET_MODE (m1->set_dest),
2181 m->set_dest);
2183 /* Get rid of the matching insn
2184 and prevent further processing of it. */
2185 m1->done = 1;
2187 /* if library call, delete all insn except last, which
2188 is deleted below */
2189 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2190 NULL_RTX)))
2192 for (temp = XEXP (temp, 0); temp != m1->insn;
2193 temp = NEXT_INSN (temp))
2194 delete_insn (temp);
2196 delete_insn (m1->insn);
2198 /* Any other movable that loads the same register
2199 MUST be moved. */
2200 already_moved[m1->regno] = 1;
2202 /* The reg merged here is now invariant,
2203 if the reg it matches is invariant. */
2204 if (! m->partial)
2205 VARRAY_INT (set_in_loop, m1->regno) = 0;
2208 else if (loop_dump_stream)
2209 fprintf (loop_dump_stream, "not desirable");
2211 else if (loop_dump_stream && !m->match)
2212 fprintf (loop_dump_stream, "not safe");
2214 if (loop_dump_stream)
2215 fprintf (loop_dump_stream, "\n");
2218 if (new_start == 0)
2219 new_start = loop_start;
2221 /* Go through all the instructions in the loop, making
2222 all the register substitutions scheduled in REG_MAP. */
2223 for (p = new_start; p != end; p = NEXT_INSN (p))
2224 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2225 || GET_CODE (p) == CALL_INSN)
2227 replace_regs (PATTERN (p), reg_map, nregs, 0);
2228 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2229 INSN_CODE (p) = -1;
2233 #if 0
2234 /* Scan X and replace the address of any MEM in it with ADDR.
2235 REG is the address that MEM should have before the replacement. */
2237 static void
2238 replace_call_address (x, reg, addr)
2239 rtx x, reg, addr;
2241 register enum rtx_code code;
2242 register int i;
2243 register const char *fmt;
2245 if (x == 0)
2246 return;
2247 code = GET_CODE (x);
2248 switch (code)
2250 case PC:
2251 case CC0:
2252 case CONST_INT:
2253 case CONST_DOUBLE:
2254 case CONST:
2255 case SYMBOL_REF:
2256 case LABEL_REF:
2257 case REG:
2258 return;
2260 case SET:
2261 /* Short cut for very common case. */
2262 replace_call_address (XEXP (x, 1), reg, addr);
2263 return;
2265 case CALL:
2266 /* Short cut for very common case. */
2267 replace_call_address (XEXP (x, 0), reg, addr);
2268 return;
2270 case MEM:
2271 /* If this MEM uses a reg other than the one we expected,
2272 something is wrong. */
2273 if (XEXP (x, 0) != reg)
2274 abort ();
2275 XEXP (x, 0) = addr;
2276 return;
2278 default:
2279 break;
2282 fmt = GET_RTX_FORMAT (code);
2283 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2285 if (fmt[i] == 'e')
2286 replace_call_address (XEXP (x, i), reg, addr);
2287 if (fmt[i] == 'E')
2289 register int j;
2290 for (j = 0; j < XVECLEN (x, i); j++)
2291 replace_call_address (XVECEXP (x, i, j), reg, addr);
2295 #endif
2297 /* Return the number of memory refs to addresses that vary
2298 in the rtx X. */
2300 static int
2301 count_nonfixed_reads (x)
2302 rtx x;
2304 register enum rtx_code code;
2305 register int i;
2306 register const char *fmt;
2307 int value;
2309 if (x == 0)
2310 return 0;
2312 code = GET_CODE (x);
2313 switch (code)
2315 case PC:
2316 case CC0:
2317 case CONST_INT:
2318 case CONST_DOUBLE:
2319 case CONST:
2320 case SYMBOL_REF:
2321 case LABEL_REF:
2322 case REG:
2323 return 0;
2325 case MEM:
2326 return ((invariant_p (XEXP (x, 0)) != 1)
2327 + count_nonfixed_reads (XEXP (x, 0)));
2329 default:
2330 break;
2333 value = 0;
2334 fmt = GET_RTX_FORMAT (code);
2335 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2337 if (fmt[i] == 'e')
2338 value += count_nonfixed_reads (XEXP (x, i));
2339 if (fmt[i] == 'E')
2341 register int j;
2342 for (j = 0; j < XVECLEN (x, i); j++)
2343 value += count_nonfixed_reads (XVECEXP (x, i, j));
2346 return value;
2350 #if 0
2351 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2352 Replace it with an instruction to load just the low bytes
2353 if the machine supports such an instruction,
2354 and insert above LOOP_START an instruction to clear the register. */
2356 static void
2357 constant_high_bytes (p, loop_start)
2358 rtx p, loop_start;
2360 register rtx new;
2361 register int insn_code_number;
2363 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2364 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2367 = gen_rtx_SET
2368 (VOIDmode,
2369 gen_rtx_STRICT_LOW_PART
2370 (VOIDmode,
2371 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2372 SET_DEST (PATTERN (p)), 0)),
2373 XEXP (SET_SRC (PATTERN (p)), 0));
2375 insn_code_number = recog (new, p);
2377 if (insn_code_number)
2379 register int i;
2381 /* Clear destination register before the loop. */
2382 emit_insn_before (gen_rtx_SET (VOIDmode,
2383 SET_DEST (PATTERN (p)), const0_rtx),
2384 loop_start);
2386 /* Inside the loop, just load the low part. */
2387 PATTERN (p) = new;
2390 #endif
2392 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2393 `has_call', `has_volatile', and `has_tablejump' within LOOP_INFO.
2394 Set the global variables `unknown_address_altered',
2395 `unknown_constant_address_altered', and `num_mem_sets'. Also, fill
2396 in the array `loop_mems' and the list `loop_store_mems'. */
2398 static void
2399 prescan_loop (start, end, loop_info)
2400 rtx start, end;
2401 struct loop_info *loop_info;
2403 register int level = 1;
2404 rtx insn;
2405 /* The label after END. Jumping here is just like falling off the
2406 end of the loop. We use next_nonnote_insn instead of next_label
2407 as a hedge against the (pathological) case where some actual insn
2408 might end up between the two. */
2409 rtx exit_target = next_nonnote_insn (end);
2411 loop_info->num = uid_loop_num [INSN_UID (start)];
2412 loop_info->has_indirect_jump = indirect_jump_in_function;
2413 loop_info->has_call = 0;
2414 loop_info->has_volatile = 0;
2415 loop_info->has_tablejump = 0;
2416 loop_info->loops_enclosed = 1;
2417 loop_info->has_multiple_exit_targets = 0;
2418 loop_info->cont = 0;
2419 loop_info->vtop = 0;
2421 unknown_address_altered = 0;
2422 unknown_constant_address_altered = 0;
2423 loop_store_mems = NULL_RTX;
2424 first_loop_store_insn = NULL_RTX;
2425 loop_mems_idx = 0;
2426 num_mem_sets = 0;
2428 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2429 insn = NEXT_INSN (insn))
2431 if (GET_CODE (insn) == NOTE)
2433 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2435 ++level;
2436 /* Count number of loops contained in this one. */
2437 loop_info->loops_enclosed++;
2439 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2441 --level;
2442 if (level == 0)
2444 end = insn;
2445 break;
2448 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2450 if (level == 1)
2451 loop_info->cont = insn;
2453 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP)
2455 /* If there is a NOTE_INSN_LOOP_VTOP, then this is a for
2456 or while style loop, with a loop exit test at the
2457 start. Thus, we can assume that the loop condition
2458 was true when the loop was entered. */
2459 if (level == 1)
2460 loop_info->vtop = insn;
2463 else if (GET_CODE (insn) == CALL_INSN)
2465 if (! CONST_CALL_P (insn))
2466 unknown_address_altered = 1;
2467 loop_info->has_call = 1;
2469 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2471 rtx label1 = NULL_RTX;
2472 rtx label2 = NULL_RTX;
2474 if (volatile_refs_p (PATTERN (insn)))
2475 loop_info->has_volatile = 1;
2477 if (GET_CODE (insn) == JUMP_INSN
2478 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2479 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2480 loop_info->has_tablejump = 1;
2482 note_stores (PATTERN (insn), note_addr_stored, NULL);
2483 if (! first_loop_store_insn && loop_store_mems)
2484 first_loop_store_insn = insn;
2486 if (! loop_info->has_multiple_exit_targets
2487 && GET_CODE (insn) == JUMP_INSN
2488 && GET_CODE (PATTERN (insn)) == SET
2489 && SET_DEST (PATTERN (insn)) == pc_rtx)
2491 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2493 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2494 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2496 else
2498 label1 = SET_SRC (PATTERN (insn));
2501 do {
2502 if (label1 && label1 != pc_rtx)
2504 if (GET_CODE (label1) != LABEL_REF)
2506 /* Something tricky. */
2507 loop_info->has_multiple_exit_targets = 1;
2508 break;
2510 else if (XEXP (label1, 0) != exit_target
2511 && LABEL_OUTSIDE_LOOP_P (label1))
2513 /* A jump outside the current loop. */
2514 loop_info->has_multiple_exit_targets = 1;
2515 break;
2519 label1 = label2;
2520 label2 = NULL_RTX;
2521 } while (label1);
2524 else if (GET_CODE (insn) == RETURN)
2525 loop_info->has_multiple_exit_targets = 1;
2528 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2529 if (/* We can't tell what MEMs are aliased by what. */
2530 !unknown_address_altered
2531 /* An exception thrown by a called function might land us
2532 anywhere. */
2533 && !loop_info->has_call
2534 /* We don't want loads for MEMs moved to a location before the
2535 one at which their stack memory becomes allocated. (Note
2536 that this is not a problem for malloc, etc., since those
2537 require actual function calls. */
2538 && !current_function_calls_alloca
2539 /* There are ways to leave the loop other than falling off the
2540 end. */
2541 && !loop_info->has_multiple_exit_targets)
2542 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2543 insn = NEXT_INSN (insn))
2544 for_each_rtx (&insn, insert_loop_mem, 0);
2547 /* LOOP_NUMBER_CONT_DOMINATOR is now the last label between the loop start
2548 and the continue note that is a the destination of a (cond)jump after
2549 the continue note. If there is any (cond)jump between the loop start
2550 and what we have so far as LOOP_NUMBER_CONT_DOMINATOR that has a
2551 target between LOOP_DOMINATOR and the continue note, move
2552 LOOP_NUMBER_CONT_DOMINATOR forward to that label; if a jump's
2553 destination cannot be determined, clear LOOP_NUMBER_CONT_DOMINATOR. */
2555 static void
2556 verify_dominator (loop_number)
2557 int loop_number;
2559 rtx insn;
2561 if (! loop_number_cont_dominator[loop_number])
2562 /* This can happen for an empty loop, e.g. in
2563 gcc.c-torture/compile/920410-2.c */
2564 return;
2565 if (loop_number_cont_dominator[loop_number] == const0_rtx)
2567 loop_number_cont_dominator[loop_number] = 0;
2568 return;
2570 for (insn = loop_number_loop_starts[loop_number];
2571 insn != loop_number_cont_dominator[loop_number];
2572 insn = NEXT_INSN (insn))
2574 if (GET_CODE (insn) == JUMP_INSN
2575 && GET_CODE (PATTERN (insn)) != RETURN)
2577 rtx label = JUMP_LABEL (insn);
2578 int label_luid;
2580 /* If it is not a jump we can easily understand or for
2581 which we do not have jump target information in the JUMP_LABEL
2582 field (consider ADDR_VEC and ADDR_DIFF_VEC insns), then clear
2583 LOOP_NUMBER_CONT_DOMINATOR. */
2584 if ((! condjump_p (insn)
2585 && ! condjump_in_parallel_p (insn))
2586 || label == NULL_RTX)
2588 loop_number_cont_dominator[loop_number] = NULL_RTX;
2589 return;
2592 label_luid = INSN_LUID (label);
2593 if (label_luid < INSN_LUID (loop_number_loop_cont[loop_number])
2594 && (label_luid
2595 > INSN_LUID (loop_number_cont_dominator[loop_number])))
2596 loop_number_cont_dominator[loop_number] = label;
2601 /* Scan the function looking for loops. Record the start and end of each loop.
2602 Also mark as invalid loops any loops that contain a setjmp or are branched
2603 to from outside the loop. */
2605 static void
2606 find_and_verify_loops (f)
2607 rtx f;
2609 rtx insn, label;
2610 int current_loop = -1;
2611 int next_loop = -1;
2612 int loop;
2614 compute_luids (f, NULL_RTX, 0);
2616 /* If there are jumps to undefined labels,
2617 treat them as jumps out of any/all loops.
2618 This also avoids writing past end of tables when there are no loops. */
2619 uid_loop_num[0] = -1;
2621 /* Find boundaries of loops, mark which loops are contained within
2622 loops, and invalidate loops that have setjmp. */
2624 for (insn = f; insn; insn = NEXT_INSN (insn))
2626 if (GET_CODE (insn) == NOTE)
2627 switch (NOTE_LINE_NUMBER (insn))
2629 case NOTE_INSN_LOOP_BEG:
2630 loop_number_loop_starts[++next_loop] = insn;
2631 loop_number_loop_ends[next_loop] = 0;
2632 loop_number_loop_cont[next_loop] = 0;
2633 loop_number_cont_dominator[next_loop] = 0;
2634 loop_outer_loop[next_loop] = current_loop;
2635 loop_invalid[next_loop] = 0;
2636 loop_number_exit_labels[next_loop] = 0;
2637 loop_number_exit_count[next_loop] = 0;
2638 current_loop = next_loop;
2639 break;
2641 case NOTE_INSN_SETJMP:
2642 /* In this case, we must invalidate our current loop and any
2643 enclosing loop. */
2644 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2646 loop_invalid[loop] = 1;
2647 if (loop_dump_stream)
2648 fprintf (loop_dump_stream,
2649 "\nLoop at %d ignored due to setjmp.\n",
2650 INSN_UID (loop_number_loop_starts[loop]));
2652 break;
2654 case NOTE_INSN_LOOP_CONT:
2655 loop_number_loop_cont[current_loop] = insn;
2656 break;
2657 case NOTE_INSN_LOOP_END:
2658 if (current_loop == -1)
2659 abort ();
2661 loop_number_loop_ends[current_loop] = insn;
2662 verify_dominator (current_loop);
2663 current_loop = loop_outer_loop[current_loop];
2664 break;
2666 default:
2667 break;
2669 /* If for any loop, this is a jump insn between the NOTE_INSN_LOOP_CONT
2670 and NOTE_INSN_LOOP_END notes, update loop_number_loop_dominator. */
2671 else if (GET_CODE (insn) == JUMP_INSN
2672 && GET_CODE (PATTERN (insn)) != RETURN
2673 && current_loop >= 0)
2675 int this_loop_num;
2676 rtx label = JUMP_LABEL (insn);
2678 if (! condjump_p (insn) && ! condjump_in_parallel_p (insn))
2679 label = NULL_RTX;
2681 this_loop_num = current_loop;
2684 /* First see if we care about this loop. */
2685 if (loop_number_loop_cont[this_loop_num]
2686 && loop_number_cont_dominator[this_loop_num] != const0_rtx)
2688 /* If the jump destination is not known, invalidate
2689 loop_number_const_dominator. */
2690 if (! label)
2691 loop_number_cont_dominator[this_loop_num] = const0_rtx;
2692 else
2693 /* Check if the destination is between loop start and
2694 cont. */
2695 if ((INSN_LUID (label)
2696 < INSN_LUID (loop_number_loop_cont[this_loop_num]))
2697 && (INSN_LUID (label)
2698 > INSN_LUID (loop_number_loop_starts[this_loop_num]))
2699 /* And if there is no later destination already
2700 recorded. */
2701 && (! loop_number_cont_dominator[this_loop_num]
2702 || (INSN_LUID (label)
2703 > INSN_LUID (loop_number_cont_dominator
2704 [this_loop_num]))))
2705 loop_number_cont_dominator[this_loop_num] = label;
2707 this_loop_num = loop_outer_loop[this_loop_num];
2709 while (this_loop_num >= 0);
2712 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2713 enclosing loop, but this doesn't matter. */
2714 uid_loop_num[INSN_UID (insn)] = current_loop;
2717 /* Any loop containing a label used in an initializer must be invalidated,
2718 because it can be jumped into from anywhere. */
2720 for (label = forced_labels; label; label = XEXP (label, 1))
2722 int loop_num;
2724 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2725 loop_num != -1;
2726 loop_num = loop_outer_loop[loop_num])
2727 loop_invalid[loop_num] = 1;
2730 /* Any loop containing a label used for an exception handler must be
2731 invalidated, because it can be jumped into from anywhere. */
2733 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2735 int loop_num;
2737 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2738 loop_num != -1;
2739 loop_num = loop_outer_loop[loop_num])
2740 loop_invalid[loop_num] = 1;
2743 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2744 loop that it is not contained within, that loop is marked invalid.
2745 If any INSN or CALL_INSN uses a label's address, then the loop containing
2746 that label is marked invalid, because it could be jumped into from
2747 anywhere.
2749 Also look for blocks of code ending in an unconditional branch that
2750 exits the loop. If such a block is surrounded by a conditional
2751 branch around the block, move the block elsewhere (see below) and
2752 invert the jump to point to the code block. This may eliminate a
2753 label in our loop and will simplify processing by both us and a
2754 possible second cse pass. */
2756 for (insn = f; insn; insn = NEXT_INSN (insn))
2757 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2759 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2761 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2763 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2764 if (note)
2766 int loop_num;
2768 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2769 loop_num != -1;
2770 loop_num = loop_outer_loop[loop_num])
2771 loop_invalid[loop_num] = 1;
2775 if (GET_CODE (insn) != JUMP_INSN)
2776 continue;
2778 mark_loop_jump (PATTERN (insn), this_loop_num);
2780 /* See if this is an unconditional branch outside the loop. */
2781 if (this_loop_num != -1
2782 && (GET_CODE (PATTERN (insn)) == RETURN
2783 || (simplejump_p (insn)
2784 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2785 != this_loop_num)))
2786 && get_max_uid () < max_uid_for_loop)
2788 rtx p;
2789 rtx our_next = next_real_insn (insn);
2790 rtx last_insn_to_move = NEXT_INSN (insn);
2791 int dest_loop;
2792 int outer_loop = -1;
2794 /* Go backwards until we reach the start of the loop, a label,
2795 or a JUMP_INSN. */
2796 for (p = PREV_INSN (insn);
2797 GET_CODE (p) != CODE_LABEL
2798 && ! (GET_CODE (p) == NOTE
2799 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2800 && GET_CODE (p) != JUMP_INSN;
2801 p = PREV_INSN (p))
2804 /* Check for the case where we have a jump to an inner nested
2805 loop, and do not perform the optimization in that case. */
2807 if (JUMP_LABEL (insn))
2809 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2810 if (dest_loop != -1)
2812 for (outer_loop = dest_loop; outer_loop != -1;
2813 outer_loop = loop_outer_loop[outer_loop])
2814 if (outer_loop == this_loop_num)
2815 break;
2819 /* Make sure that the target of P is within the current loop. */
2821 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2822 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2823 outer_loop = this_loop_num;
2825 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2826 we have a block of code to try to move.
2828 We look backward and then forward from the target of INSN
2829 to find a BARRIER at the same loop depth as the target.
2830 If we find such a BARRIER, we make a new label for the start
2831 of the block, invert the jump in P and point it to that label,
2832 and move the block of code to the spot we found. */
2834 if (outer_loop == -1
2835 && GET_CODE (p) == JUMP_INSN
2836 && JUMP_LABEL (p) != 0
2837 /* Just ignore jumps to labels that were never emitted.
2838 These always indicate compilation errors. */
2839 && INSN_UID (JUMP_LABEL (p)) != 0
2840 && condjump_p (p)
2841 && ! simplejump_p (p)
2842 && next_real_insn (JUMP_LABEL (p)) == our_next
2843 /* If it's not safe to move the sequence, then we
2844 mustn't try. */
2845 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2846 &last_insn_to_move))
2848 rtx target
2849 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2850 int target_loop_num = uid_loop_num[INSN_UID (target)];
2851 rtx loc, loc2;
2853 for (loc = target; loc; loc = PREV_INSN (loc))
2854 if (GET_CODE (loc) == BARRIER
2855 /* Don't move things inside a tablejump. */
2856 && ((loc2 = next_nonnote_insn (loc)) == 0
2857 || GET_CODE (loc2) != CODE_LABEL
2858 || (loc2 = next_nonnote_insn (loc2)) == 0
2859 || GET_CODE (loc2) != JUMP_INSN
2860 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2861 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2862 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2863 break;
2865 if (loc == 0)
2866 for (loc = target; loc; loc = NEXT_INSN (loc))
2867 if (GET_CODE (loc) == BARRIER
2868 /* Don't move things inside a tablejump. */
2869 && ((loc2 = next_nonnote_insn (loc)) == 0
2870 || GET_CODE (loc2) != CODE_LABEL
2871 || (loc2 = next_nonnote_insn (loc2)) == 0
2872 || GET_CODE (loc2) != JUMP_INSN
2873 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2874 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2875 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2876 break;
2878 if (loc)
2880 rtx cond_label = JUMP_LABEL (p);
2881 rtx new_label = get_label_after (p);
2883 /* Ensure our label doesn't go away. */
2884 LABEL_NUSES (cond_label)++;
2886 /* Verify that uid_loop_num is large enough and that
2887 we can invert P. */
2888 if (invert_jump (p, new_label))
2890 rtx q, r;
2892 /* If no suitable BARRIER was found, create a suitable
2893 one before TARGET. Since TARGET is a fall through
2894 path, we'll need to insert an jump around our block
2895 and a add a BARRIER before TARGET.
2897 This creates an extra unconditional jump outside
2898 the loop. However, the benefits of removing rarely
2899 executed instructions from inside the loop usually
2900 outweighs the cost of the extra unconditional jump
2901 outside the loop. */
2902 if (loc == 0)
2904 rtx temp;
2906 temp = gen_jump (JUMP_LABEL (insn));
2907 temp = emit_jump_insn_before (temp, target);
2908 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2909 LABEL_NUSES (JUMP_LABEL (insn))++;
2910 loc = emit_barrier_before (target);
2913 /* Include the BARRIER after INSN and copy the
2914 block after LOC. */
2915 new_label = squeeze_notes (new_label,
2916 last_insn_to_move);
2917 reorder_insns (new_label, last_insn_to_move, loc);
2919 /* All those insns are now in TARGET_LOOP_NUM. */
2920 for (q = new_label;
2921 q != NEXT_INSN (last_insn_to_move);
2922 q = NEXT_INSN (q))
2923 uid_loop_num[INSN_UID (q)] = target_loop_num;
2925 /* The label jumped to by INSN is no longer a loop exit.
2926 Unless INSN does not have a label (e.g., it is a
2927 RETURN insn), search loop_number_exit_labels to find
2928 its label_ref, and remove it. Also turn off
2929 LABEL_OUTSIDE_LOOP_P bit. */
2930 if (JUMP_LABEL (insn))
2932 int loop_num;
2934 for (q = 0,
2935 r = loop_number_exit_labels[this_loop_num];
2936 r; q = r, r = LABEL_NEXTREF (r))
2937 if (XEXP (r, 0) == JUMP_LABEL (insn))
2939 LABEL_OUTSIDE_LOOP_P (r) = 0;
2940 if (q)
2941 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2942 else
2943 loop_number_exit_labels[this_loop_num]
2944 = LABEL_NEXTREF (r);
2945 break;
2948 for (loop_num = this_loop_num;
2949 loop_num != -1 && loop_num != target_loop_num;
2950 loop_num = loop_outer_loop[loop_num])
2951 loop_number_exit_count[loop_num]--;
2953 /* If we didn't find it, then something is wrong. */
2954 if (! r)
2955 abort ();
2958 /* P is now a jump outside the loop, so it must be put
2959 in loop_number_exit_labels, and marked as such.
2960 The easiest way to do this is to just call
2961 mark_loop_jump again for P. */
2962 mark_loop_jump (PATTERN (p), this_loop_num);
2964 /* If INSN now jumps to the insn after it,
2965 delete INSN. */
2966 if (JUMP_LABEL (insn) != 0
2967 && (next_real_insn (JUMP_LABEL (insn))
2968 == next_real_insn (insn)))
2969 delete_insn (insn);
2972 /* Continue the loop after where the conditional
2973 branch used to jump, since the only branch insn
2974 in the block (if it still remains) is an inter-loop
2975 branch and hence needs no processing. */
2976 insn = NEXT_INSN (cond_label);
2978 if (--LABEL_NUSES (cond_label) == 0)
2979 delete_insn (cond_label);
2981 /* This loop will be continued with NEXT_INSN (insn). */
2982 insn = PREV_INSN (insn);
2989 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2990 loops it is contained in, mark the target loop invalid.
2992 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2994 static void
2995 mark_loop_jump (x, loop_num)
2996 rtx x;
2997 int loop_num;
2999 int dest_loop;
3000 int outer_loop;
3001 int i;
3003 switch (GET_CODE (x))
3005 case PC:
3006 case USE:
3007 case CLOBBER:
3008 case REG:
3009 case MEM:
3010 case CONST_INT:
3011 case CONST_DOUBLE:
3012 case RETURN:
3013 return;
3015 case CONST:
3016 /* There could be a label reference in here. */
3017 mark_loop_jump (XEXP (x, 0), loop_num);
3018 return;
3020 case PLUS:
3021 case MINUS:
3022 case MULT:
3023 mark_loop_jump (XEXP (x, 0), loop_num);
3024 mark_loop_jump (XEXP (x, 1), loop_num);
3025 return;
3027 case LO_SUM:
3028 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3029 mark_loop_jump (XEXP (x, 1), loop_num);
3030 return;
3032 case SIGN_EXTEND:
3033 case ZERO_EXTEND:
3034 mark_loop_jump (XEXP (x, 0), loop_num);
3035 return;
3037 case LABEL_REF:
3038 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
3040 /* Link together all labels that branch outside the loop. This
3041 is used by final_[bg]iv_value and the loop unrolling code. Also
3042 mark this LABEL_REF so we know that this branch should predict
3043 false. */
3045 /* A check to make sure the label is not in an inner nested loop,
3046 since this does not count as a loop exit. */
3047 if (dest_loop != -1)
3049 for (outer_loop = dest_loop; outer_loop != -1;
3050 outer_loop = loop_outer_loop[outer_loop])
3051 if (outer_loop == loop_num)
3052 break;
3054 else
3055 outer_loop = -1;
3057 if (loop_num != -1 && outer_loop == -1)
3059 LABEL_OUTSIDE_LOOP_P (x) = 1;
3060 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
3061 loop_number_exit_labels[loop_num] = x;
3063 for (outer_loop = loop_num;
3064 outer_loop != -1 && outer_loop != dest_loop;
3065 outer_loop = loop_outer_loop[outer_loop])
3066 loop_number_exit_count[outer_loop]++;
3069 /* If this is inside a loop, but not in the current loop or one enclosed
3070 by it, it invalidates at least one loop. */
3072 if (dest_loop == -1)
3073 return;
3075 /* We must invalidate every nested loop containing the target of this
3076 label, except those that also contain the jump insn. */
3078 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
3080 /* Stop when we reach a loop that also contains the jump insn. */
3081 for (outer_loop = loop_num; outer_loop != -1;
3082 outer_loop = loop_outer_loop[outer_loop])
3083 if (dest_loop == outer_loop)
3084 return;
3086 /* If we get here, we know we need to invalidate a loop. */
3087 if (loop_dump_stream && ! loop_invalid[dest_loop])
3088 fprintf (loop_dump_stream,
3089 "\nLoop at %d ignored due to multiple entry points.\n",
3090 INSN_UID (loop_number_loop_starts[dest_loop]));
3092 loop_invalid[dest_loop] = 1;
3094 return;
3096 case SET:
3097 /* If this is not setting pc, ignore. */
3098 if (SET_DEST (x) == pc_rtx)
3099 mark_loop_jump (SET_SRC (x), loop_num);
3100 return;
3102 case IF_THEN_ELSE:
3103 mark_loop_jump (XEXP (x, 1), loop_num);
3104 mark_loop_jump (XEXP (x, 2), loop_num);
3105 return;
3107 case PARALLEL:
3108 case ADDR_VEC:
3109 for (i = 0; i < XVECLEN (x, 0); i++)
3110 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
3111 return;
3113 case ADDR_DIFF_VEC:
3114 for (i = 0; i < XVECLEN (x, 1); i++)
3115 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
3116 return;
3118 default:
3119 /* Strictly speaking this is not a jump into the loop, only a possible
3120 jump out of the loop. However, we have no way to link the destination
3121 of this jump onto the list of exit labels. To be safe we mark this
3122 loop and any containing loops as invalid. */
3123 if (loop_num != -1)
3125 for (outer_loop = loop_num; outer_loop != -1;
3126 outer_loop = loop_outer_loop[outer_loop])
3128 if (loop_dump_stream && ! loop_invalid[outer_loop])
3129 fprintf (loop_dump_stream,
3130 "\nLoop at %d ignored due to unknown exit jump.\n",
3131 INSN_UID (loop_number_loop_starts[outer_loop]));
3132 loop_invalid[outer_loop] = 1;
3135 return;
3139 /* Return nonzero if there is a label in the range from
3140 insn INSN to and including the insn whose luid is END
3141 INSN must have an assigned luid (i.e., it must not have
3142 been previously created by loop.c). */
3144 static int
3145 labels_in_range_p (insn, end)
3146 rtx insn;
3147 int end;
3149 while (insn && INSN_LUID (insn) <= end)
3151 if (GET_CODE (insn) == CODE_LABEL)
3152 return 1;
3153 insn = NEXT_INSN (insn);
3156 return 0;
3159 /* Record that a memory reference X is being set. */
3161 static void
3162 note_addr_stored (x, y, data)
3163 rtx x;
3164 rtx y ATTRIBUTE_UNUSED;
3165 void *data ATTRIBUTE_UNUSED;
3167 if (x == 0 || GET_CODE (x) != MEM)
3168 return;
3170 /* Count number of memory writes.
3171 This affects heuristics in strength_reduce. */
3172 num_mem_sets++;
3174 /* BLKmode MEM means all memory is clobbered. */
3175 if (GET_MODE (x) == BLKmode)
3177 if (RTX_UNCHANGING_P (x))
3178 unknown_constant_address_altered = 1;
3179 else
3180 unknown_address_altered = 1;
3182 return;
3185 loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_store_mems);
3188 /* X is a value modified by an INSN that references a biv inside a loop
3189 exit test (ie, X is somehow related to the value of the biv). If X
3190 is a pseudo that is used more than once, then the biv is (effectively)
3191 used more than once. DATA is really an `int *', and is set if the
3192 biv is used more than once. */
3194 static void
3195 note_set_pseudo_multiple_uses (x, y, data)
3196 rtx x;
3197 rtx y ATTRIBUTE_UNUSED;
3198 void *data;
3200 if (x == 0)
3201 return;
3203 while (GET_CODE (x) == STRICT_LOW_PART
3204 || GET_CODE (x) == SIGN_EXTRACT
3205 || GET_CODE (x) == ZERO_EXTRACT
3206 || GET_CODE (x) == SUBREG)
3207 x = XEXP (x, 0);
3209 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3210 return;
3212 /* If we do not have usage information, or if we know the register
3213 is used more than once, note that fact for check_dbra_loop. */
3214 if (REGNO (x) >= max_reg_before_loop
3215 || ! VARRAY_RTX (reg_single_usage, REGNO (x))
3216 || VARRAY_RTX (reg_single_usage, REGNO (x)) == const0_rtx)
3217 *((int *) data) = 1;
3220 /* Return nonzero if the rtx X is invariant over the current loop.
3222 The value is 2 if we refer to something only conditionally invariant.
3224 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3225 Otherwise, a memory ref is invariant if it does not conflict with
3226 anything stored in `loop_store_mems'. */
3229 invariant_p (x)
3230 register rtx x;
3232 register int i;
3233 register enum rtx_code code;
3234 register const char *fmt;
3235 int conditional = 0;
3236 rtx mem_list_entry;
3238 if (x == 0)
3239 return 1;
3240 code = GET_CODE (x);
3241 switch (code)
3243 case CONST_INT:
3244 case CONST_DOUBLE:
3245 case SYMBOL_REF:
3246 case CONST:
3247 return 1;
3249 case LABEL_REF:
3250 /* A LABEL_REF is normally invariant, however, if we are unrolling
3251 loops, and this label is inside the loop, then it isn't invariant.
3252 This is because each unrolled copy of the loop body will have
3253 a copy of this label. If this was invariant, then an insn loading
3254 the address of this label into a register might get moved outside
3255 the loop, and then each loop body would end up using the same label.
3257 We don't know the loop bounds here though, so just fail for all
3258 labels. */
3259 if (flag_unroll_loops)
3260 return 0;
3261 else
3262 return 1;
3264 case PC:
3265 case CC0:
3266 case UNSPEC_VOLATILE:
3267 return 0;
3269 case REG:
3270 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3271 since the reg might be set by initialization within the loop. */
3273 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3274 || x == arg_pointer_rtx)
3275 && ! current_function_has_nonlocal_goto)
3276 return 1;
3278 if (this_loop_info.has_call
3279 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3280 return 0;
3282 if (VARRAY_INT (set_in_loop, REGNO (x)) < 0)
3283 return 2;
3285 return VARRAY_INT (set_in_loop, REGNO (x)) == 0;
3287 case MEM:
3288 /* If we had a subroutine call, any location in memory could
3289 have been clobbered. We used to test here for volatile and
3290 readonly, but true_dependence knows how to do that better
3291 than we do. */
3292 if (RTX_UNCHANGING_P (x)
3293 ? unknown_constant_address_altered : unknown_address_altered)
3294 return 0;
3296 /* See if there is any dependence between a store and this load. */
3297 mem_list_entry = loop_store_mems;
3298 while (mem_list_entry)
3300 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3301 x, rtx_varies_p))
3302 return 0;
3304 mem_list_entry = XEXP (mem_list_entry, 1);
3307 /* It's not invalidated by a store in memory
3308 but we must still verify the address is invariant. */
3309 break;
3311 case ASM_OPERANDS:
3312 /* Don't mess with insns declared volatile. */
3313 if (MEM_VOLATILE_P (x))
3314 return 0;
3315 break;
3317 default:
3318 break;
3321 fmt = GET_RTX_FORMAT (code);
3322 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3324 if (fmt[i] == 'e')
3326 int tem = invariant_p (XEXP (x, i));
3327 if (tem == 0)
3328 return 0;
3329 if (tem == 2)
3330 conditional = 1;
3332 else if (fmt[i] == 'E')
3334 register int j;
3335 for (j = 0; j < XVECLEN (x, i); j++)
3337 int tem = invariant_p (XVECEXP (x, i, j));
3338 if (tem == 0)
3339 return 0;
3340 if (tem == 2)
3341 conditional = 1;
3347 return 1 + conditional;
3351 /* Return nonzero if all the insns in the loop that set REG
3352 are INSN and the immediately following insns,
3353 and if each of those insns sets REG in an invariant way
3354 (not counting uses of REG in them).
3356 The value is 2 if some of these insns are only conditionally invariant.
3358 We assume that INSN itself is the first set of REG
3359 and that its source is invariant. */
3361 static int
3362 consec_sets_invariant_p (reg, n_sets, insn)
3363 int n_sets;
3364 rtx reg, insn;
3366 register rtx p = insn;
3367 register int regno = REGNO (reg);
3368 rtx temp;
3369 /* Number of sets we have to insist on finding after INSN. */
3370 int count = n_sets - 1;
3371 int old = VARRAY_INT (set_in_loop, regno);
3372 int value = 0;
3373 int this;
3375 /* If N_SETS hit the limit, we can't rely on its value. */
3376 if (n_sets == 127)
3377 return 0;
3379 VARRAY_INT (set_in_loop, regno) = 0;
3381 while (count > 0)
3383 register enum rtx_code code;
3384 rtx set;
3386 p = NEXT_INSN (p);
3387 code = GET_CODE (p);
3389 /* If library call, skip to end of it. */
3390 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3391 p = XEXP (temp, 0);
3393 this = 0;
3394 if (code == INSN
3395 && (set = single_set (p))
3396 && GET_CODE (SET_DEST (set)) == REG
3397 && REGNO (SET_DEST (set)) == regno)
3399 this = invariant_p (SET_SRC (set));
3400 if (this != 0)
3401 value |= this;
3402 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3404 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3405 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3406 notes are OK. */
3407 this = (CONSTANT_P (XEXP (temp, 0))
3408 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3409 && invariant_p (XEXP (temp, 0))));
3410 if (this != 0)
3411 value |= this;
3414 if (this != 0)
3415 count--;
3416 else if (code != NOTE)
3418 VARRAY_INT (set_in_loop, regno) = old;
3419 return 0;
3423 VARRAY_INT (set_in_loop, regno) = old;
3424 /* If invariant_p ever returned 2, we return 2. */
3425 return 1 + (value & 2);
3428 #if 0
3429 /* I don't think this condition is sufficient to allow INSN
3430 to be moved, so we no longer test it. */
3432 /* Return 1 if all insns in the basic block of INSN and following INSN
3433 that set REG are invariant according to TABLE. */
3435 static int
3436 all_sets_invariant_p (reg, insn, table)
3437 rtx reg, insn;
3438 short *table;
3440 register rtx p = insn;
3441 register int regno = REGNO (reg);
3443 while (1)
3445 register enum rtx_code code;
3446 p = NEXT_INSN (p);
3447 code = GET_CODE (p);
3448 if (code == CODE_LABEL || code == JUMP_INSN)
3449 return 1;
3450 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3451 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3452 && REGNO (SET_DEST (PATTERN (p))) == regno)
3454 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3455 return 0;
3459 #endif /* 0 */
3461 /* Look at all uses (not sets) of registers in X. For each, if it is
3462 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3463 a different insn, set USAGE[REGNO] to const0_rtx. */
3465 static void
3466 find_single_use_in_loop (insn, x, usage)
3467 rtx insn;
3468 rtx x;
3469 varray_type usage;
3471 enum rtx_code code = GET_CODE (x);
3472 const char *fmt = GET_RTX_FORMAT (code);
3473 int i, j;
3475 if (code == REG)
3476 VARRAY_RTX (usage, REGNO (x))
3477 = (VARRAY_RTX (usage, REGNO (x)) != 0
3478 && VARRAY_RTX (usage, REGNO (x)) != insn)
3479 ? const0_rtx : insn;
3481 else if (code == SET)
3483 /* Don't count SET_DEST if it is a REG; otherwise count things
3484 in SET_DEST because if a register is partially modified, it won't
3485 show up as a potential movable so we don't care how USAGE is set
3486 for it. */
3487 if (GET_CODE (SET_DEST (x)) != REG)
3488 find_single_use_in_loop (insn, SET_DEST (x), usage);
3489 find_single_use_in_loop (insn, SET_SRC (x), usage);
3491 else
3492 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3494 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3495 find_single_use_in_loop (insn, XEXP (x, i), usage);
3496 else if (fmt[i] == 'E')
3497 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3498 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3502 /* Count and record any set in X which is contained in INSN. Update
3503 MAY_NOT_MOVE and LAST_SET for any register set in X. */
3505 static void
3506 count_one_set (insn, x, may_not_move, last_set)
3507 rtx insn, x;
3508 varray_type may_not_move;
3509 rtx *last_set;
3511 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3512 /* Don't move a reg that has an explicit clobber.
3513 It's not worth the pain to try to do it correctly. */
3514 VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
3516 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3518 rtx dest = SET_DEST (x);
3519 while (GET_CODE (dest) == SUBREG
3520 || GET_CODE (dest) == ZERO_EXTRACT
3521 || GET_CODE (dest) == SIGN_EXTRACT
3522 || GET_CODE (dest) == STRICT_LOW_PART)
3523 dest = XEXP (dest, 0);
3524 if (GET_CODE (dest) == REG)
3526 register int regno = REGNO (dest);
3527 /* If this is the first setting of this reg
3528 in current basic block, and it was set before,
3529 it must be set in two basic blocks, so it cannot
3530 be moved out of the loop. */
3531 if (VARRAY_INT (set_in_loop, regno) > 0
3532 && last_set[regno] == 0)
3533 VARRAY_CHAR (may_not_move, regno) = 1;
3534 /* If this is not first setting in current basic block,
3535 see if reg was used in between previous one and this.
3536 If so, neither one can be moved. */
3537 if (last_set[regno] != 0
3538 && reg_used_between_p (dest, last_set[regno], insn))
3539 VARRAY_CHAR (may_not_move, regno) = 1;
3540 if (VARRAY_INT (set_in_loop, regno) < 127)
3541 ++VARRAY_INT (set_in_loop, regno);
3542 last_set[regno] = insn;
3547 /* Increment SET_IN_LOOP at the index of each register
3548 that is modified by an insn between FROM and TO.
3549 If the value of an element of SET_IN_LOOP becomes 127 or more,
3550 stop incrementing it, to avoid overflow.
3552 Store in SINGLE_USAGE[I] the single insn in which register I is
3553 used, if it is only used once. Otherwise, it is set to 0 (for no
3554 uses) or const0_rtx for more than one use. This parameter may be zero,
3555 in which case this processing is not done.
3557 Store in *COUNT_PTR the number of actual instruction
3558 in the loop. We use this to decide what is worth moving out. */
3560 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
3561 In that case, it is the insn that last set reg n. */
3563 static void
3564 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3565 register rtx from, to;
3566 varray_type may_not_move;
3567 varray_type single_usage;
3568 int *count_ptr;
3569 int nregs;
3571 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3572 register rtx insn;
3573 register int count = 0;
3575 bzero ((char *) last_set, nregs * sizeof (rtx));
3576 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3578 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3580 ++count;
3582 /* Record registers that have exactly one use. */
3583 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3585 /* Include uses in REG_EQUAL notes. */
3586 if (REG_NOTES (insn))
3587 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3589 if (GET_CODE (PATTERN (insn)) == SET
3590 || GET_CODE (PATTERN (insn)) == CLOBBER)
3591 count_one_set (insn, PATTERN (insn), may_not_move, last_set);
3592 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3594 register int i;
3595 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3596 count_one_set (insn, XVECEXP (PATTERN (insn), 0, i),
3597 may_not_move, last_set);
3601 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3602 bzero ((char *) last_set, nregs * sizeof (rtx));
3604 *count_ptr = count;
3607 /* Given a loop that is bounded by LOOP_START and LOOP_END
3608 and that is entered at SCAN_START,
3609 return 1 if the register set in SET contained in insn INSN is used by
3610 any insn that precedes INSN in cyclic order starting
3611 from the loop entry point.
3613 We don't want to use INSN_LUID here because if we restrict INSN to those
3614 that have a valid INSN_LUID, it means we cannot move an invariant out
3615 from an inner loop past two loops. */
3617 static int
3618 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3619 rtx set, insn, loop_start, scan_start, loop_end;
3621 rtx reg = SET_DEST (set);
3622 rtx p;
3624 /* Scan forward checking for register usage. If we hit INSN, we
3625 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3626 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3628 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3629 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3630 return 1;
3632 if (p == loop_end)
3633 p = loop_start;
3636 return 0;
3639 /* A "basic induction variable" or biv is a pseudo reg that is set
3640 (within this loop) only by incrementing or decrementing it. */
3641 /* A "general induction variable" or giv is a pseudo reg whose
3642 value is a linear function of a biv. */
3644 /* Bivs are recognized by `basic_induction_var';
3645 Givs by `general_induction_var'. */
3647 /* Indexed by register number, indicates whether or not register is an
3648 induction variable, and if so what type. */
3650 varray_type reg_iv_type;
3652 /* Indexed by register number, contains pointer to `struct induction'
3653 if register is an induction variable. This holds general info for
3654 all induction variables. */
3656 varray_type reg_iv_info;
3658 /* Indexed by register number, contains pointer to `struct iv_class'
3659 if register is a basic induction variable. This holds info describing
3660 the class (a related group) of induction variables that the biv belongs
3661 to. */
3663 struct iv_class **reg_biv_class;
3665 /* The head of a list which links together (via the next field)
3666 every iv class for the current loop. */
3668 struct iv_class *loop_iv_list;
3670 /* Givs made from biv increments are always splittable for loop unrolling.
3671 Since there is no regscan info for them, we have to keep track of them
3672 separately. */
3673 int first_increment_giv, last_increment_giv;
3675 /* Communication with routines called via `note_stores'. */
3677 static rtx note_insn;
3679 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3681 static rtx addr_placeholder;
3683 /* ??? Unfinished optimizations, and possible future optimizations,
3684 for the strength reduction code. */
3686 /* ??? The interaction of biv elimination, and recognition of 'constant'
3687 bivs, may cause problems. */
3689 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3690 performance problems.
3692 Perhaps don't eliminate things that can be combined with an addressing
3693 mode. Find all givs that have the same biv, mult_val, and add_val;
3694 then for each giv, check to see if its only use dies in a following
3695 memory address. If so, generate a new memory address and check to see
3696 if it is valid. If it is valid, then store the modified memory address,
3697 otherwise, mark the giv as not done so that it will get its own iv. */
3699 /* ??? Could try to optimize branches when it is known that a biv is always
3700 positive. */
3702 /* ??? When replace a biv in a compare insn, we should replace with closest
3703 giv so that an optimized branch can still be recognized by the combiner,
3704 e.g. the VAX acb insn. */
3706 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3707 was rerun in loop_optimize whenever a register was added or moved.
3708 Also, some of the optimizations could be a little less conservative. */
3710 /* Perform strength reduction and induction variable elimination.
3712 Pseudo registers created during this function will be beyond the last
3713 valid index in several tables including n_times_set and regno_last_uid.
3714 This does not cause a problem here, because the added registers cannot be
3715 givs outside of their loop, and hence will never be reconsidered.
3716 But scan_loop must check regnos to make sure they are in bounds.
3718 SCAN_START is the first instruction in the loop, as the loop would
3719 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3720 the first instruction in the loop, as it is layed out in the
3721 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG.
3722 LOOP_CONT is the NOTE_INSN_LOOP_CONT. */
3724 static void
3725 strength_reduce (scan_start, end, loop_top, insn_count,
3726 loop_start, loop_end, loop_info, loop_cont, unroll_p, bct_p)
3727 rtx scan_start;
3728 rtx end;
3729 rtx loop_top;
3730 int insn_count;
3731 rtx loop_start;
3732 rtx loop_end;
3733 struct loop_info *loop_info;
3734 rtx loop_cont;
3735 int unroll_p, bct_p ATTRIBUTE_UNUSED;
3737 rtx p;
3738 rtx set;
3739 rtx inc_val;
3740 rtx mult_val;
3741 rtx dest_reg;
3742 rtx *location;
3743 /* This is 1 if current insn is not executed at least once for every loop
3744 iteration. */
3745 int not_every_iteration = 0;
3746 /* This is 1 if current insn may be executed more than once for every
3747 loop iteration. */
3748 int maybe_multiple = 0;
3749 /* This is 1 if we have past a branch back to the top of the loop
3750 (aka a loop latch). */
3751 int past_loop_latch = 0;
3752 /* Temporary list pointers for traversing loop_iv_list. */
3753 struct iv_class *bl, **backbl;
3754 /* Ratio of extra register life span we can justify
3755 for saving an instruction. More if loop doesn't call subroutines
3756 since in that case saving an insn makes more difference
3757 and more registers are available. */
3758 /* ??? could set this to last value of threshold in move_movables */
3759 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3760 /* Map of pseudo-register replacements. */
3761 rtx *reg_map;
3762 int reg_map_size;
3763 int call_seen;
3764 rtx test;
3765 rtx end_insert_before;
3766 int loop_depth = 0;
3767 int n_extra_increment;
3768 int unrolled_insn_copies = 0;
3770 /* If scan_start points to the loop exit test, we have to be wary of
3771 subversive use of gotos inside expression statements. */
3772 if (prev_nonnote_insn (scan_start) != prev_nonnote_insn (loop_start))
3773 maybe_multiple = back_branch_in_range_p (scan_start, loop_start, loop_end);
3775 VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type");
3776 VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info");
3777 reg_biv_class = (struct iv_class **)
3778 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3779 bzero ((char *) reg_biv_class, (max_reg_before_loop
3780 * sizeof (struct iv_class *)));
3782 loop_iv_list = 0;
3783 addr_placeholder = gen_reg_rtx (Pmode);
3785 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3786 must be put before this insn, so that they will appear in the right
3787 order (i.e. loop order).
3789 If loop_end is the end of the current function, then emit a
3790 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3791 dummy note insn. */
3792 if (NEXT_INSN (loop_end) != 0)
3793 end_insert_before = NEXT_INSN (loop_end);
3794 else
3795 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3797 /* Scan through loop to find all possible bivs. */
3799 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
3800 p != NULL_RTX;
3801 p = next_insn_in_loop (p, scan_start, end, loop_top))
3803 if (GET_CODE (p) == INSN
3804 && (set = single_set (p))
3805 && GET_CODE (SET_DEST (set)) == REG)
3807 dest_reg = SET_DEST (set);
3808 if (REGNO (dest_reg) < max_reg_before_loop
3809 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3810 && REG_IV_TYPE (REGNO (dest_reg)) != NOT_BASIC_INDUCT)
3812 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3813 dest_reg, p, &inc_val, &mult_val,
3814 &location))
3816 /* It is a possible basic induction variable.
3817 Create and initialize an induction structure for it. */
3819 struct induction *v
3820 = (struct induction *) alloca (sizeof (struct induction));
3822 record_biv (v, p, dest_reg, inc_val, mult_val, location,
3823 not_every_iteration, maybe_multiple);
3824 REG_IV_TYPE (REGNO (dest_reg)) = BASIC_INDUCT;
3826 else if (REGNO (dest_reg) < max_reg_before_loop)
3827 REG_IV_TYPE (REGNO (dest_reg)) = NOT_BASIC_INDUCT;
3831 /* Past CODE_LABEL, we get to insns that may be executed multiple
3832 times. The only way we can be sure that they can't is if every
3833 jump insn between here and the end of the loop either
3834 returns, exits the loop, is a jump to a location that is still
3835 behind the label, or is a jump to the loop start. */
3837 if (GET_CODE (p) == CODE_LABEL)
3839 rtx insn = p;
3841 maybe_multiple = 0;
3843 while (1)
3845 insn = NEXT_INSN (insn);
3846 if (insn == scan_start)
3847 break;
3848 if (insn == end)
3850 if (loop_top != 0)
3851 insn = loop_top;
3852 else
3853 break;
3854 if (insn == scan_start)
3855 break;
3858 if (GET_CODE (insn) == JUMP_INSN
3859 && GET_CODE (PATTERN (insn)) != RETURN
3860 && (! condjump_p (insn)
3861 || (JUMP_LABEL (insn) != 0
3862 && JUMP_LABEL (insn) != scan_start
3863 && ! loop_insn_first_p (p, JUMP_LABEL (insn)))))
3865 maybe_multiple = 1;
3866 break;
3871 /* Past a jump, we get to insns for which we can't count
3872 on whether they will be executed during each iteration. */
3873 /* This code appears twice in strength_reduce. There is also similar
3874 code in scan_loop. */
3875 if (GET_CODE (p) == JUMP_INSN
3876 /* If we enter the loop in the middle, and scan around to the
3877 beginning, don't set not_every_iteration for that.
3878 This can be any kind of jump, since we want to know if insns
3879 will be executed if the loop is executed. */
3880 && ! (JUMP_LABEL (p) == loop_top
3881 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3882 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3884 rtx label = 0;
3886 /* If this is a jump outside the loop, then it also doesn't
3887 matter. Check to see if the target of this branch is on the
3888 loop_number_exits_labels list. */
3890 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3891 label;
3892 label = LABEL_NEXTREF (label))
3893 if (XEXP (label, 0) == JUMP_LABEL (p))
3894 break;
3896 if (! label)
3897 not_every_iteration = 1;
3900 else if (GET_CODE (p) == NOTE)
3902 /* At the virtual top of a converted loop, insns are again known to
3903 be executed each iteration: logically, the loop begins here
3904 even though the exit code has been duplicated.
3906 Insns are also again known to be executed each iteration at
3907 the LOOP_CONT note. */
3908 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3909 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3910 && loop_depth == 0)
3911 not_every_iteration = 0;
3912 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3913 loop_depth++;
3914 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3915 loop_depth--;
3918 /* Note if we pass a loop latch. If we do, then we can not clear
3919 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3920 a loop since a jump before the last CODE_LABEL may have started
3921 a new loop iteration.
3923 Note that LOOP_TOP is only set for rotated loops and we need
3924 this check for all loops, so compare against the CODE_LABEL
3925 which immediately follows LOOP_START. */
3926 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == NEXT_INSN (loop_start))
3927 past_loop_latch = 1;
3929 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3930 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3931 or not an insn is known to be executed each iteration of the
3932 loop, whether or not any iterations are known to occur.
3934 Therefore, if we have just passed a label and have no more labels
3935 between here and the test insn of the loop, and we have not passed
3936 a jump to the top of the loop, then we know these insns will be
3937 executed each iteration. */
3939 if (not_every_iteration
3940 && ! past_loop_latch
3941 && GET_CODE (p) == CODE_LABEL
3942 && no_labels_between_p (p, loop_end)
3943 && loop_insn_first_p (p, loop_cont))
3944 not_every_iteration = 0;
3947 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3948 Make a sanity check against n_times_set. */
3949 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3951 if (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
3952 /* Above happens if register modified by subreg, etc. */
3953 /* Make sure it is not recognized as a basic induction var: */
3954 || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
3955 /* If never incremented, it is invariant that we decided not to
3956 move. So leave it alone. */
3957 || ! bl->incremented)
3959 if (loop_dump_stream)
3960 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3961 bl->regno,
3962 (REG_IV_TYPE (bl->regno) != BASIC_INDUCT
3963 ? "not induction variable"
3964 : (! bl->incremented ? "never incremented"
3965 : "count error")));
3967 REG_IV_TYPE (bl->regno) = NOT_BASIC_INDUCT;
3968 *backbl = bl->next;
3970 else
3972 backbl = &bl->next;
3974 if (loop_dump_stream)
3975 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3979 /* Exit if there are no bivs. */
3980 if (! loop_iv_list)
3982 /* Can still unroll the loop anyways, but indicate that there is no
3983 strength reduction info available. */
3984 if (unroll_p)
3985 unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
3986 loop_info, 0);
3988 goto egress;
3991 /* Find initial value for each biv by searching backwards from loop_start,
3992 halting at first label. Also record any test condition. */
3994 call_seen = 0;
3995 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3997 note_insn = p;
3999 if (GET_CODE (p) == CALL_INSN)
4000 call_seen = 1;
4002 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4003 || GET_CODE (p) == CALL_INSN)
4004 note_stores (PATTERN (p), record_initial, NULL);
4006 /* Record any test of a biv that branches around the loop if no store
4007 between it and the start of loop. We only care about tests with
4008 constants and registers and only certain of those. */
4009 if (GET_CODE (p) == JUMP_INSN
4010 && JUMP_LABEL (p) != 0
4011 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
4012 && (test = get_condition_for_loop (p)) != 0
4013 && GET_CODE (XEXP (test, 0)) == REG
4014 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4015 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
4016 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
4017 && bl->init_insn == 0)
4019 /* If an NE test, we have an initial value! */
4020 if (GET_CODE (test) == NE)
4022 bl->init_insn = p;
4023 bl->init_set = gen_rtx_SET (VOIDmode,
4024 XEXP (test, 0), XEXP (test, 1));
4026 else
4027 bl->initial_test = test;
4031 /* Look at the each biv and see if we can say anything better about its
4032 initial value from any initializing insns set up above. (This is done
4033 in two passes to avoid missing SETs in a PARALLEL.) */
4034 for (backbl = &loop_iv_list; (bl = *backbl); backbl = &bl->next)
4036 rtx src;
4037 rtx note;
4039 if (! bl->init_insn)
4040 continue;
4042 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4043 is a constant, use the value of that. */
4044 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4045 && CONSTANT_P (XEXP (note, 0)))
4046 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4047 && CONSTANT_P (XEXP (note, 0))))
4048 src = XEXP (note, 0);
4049 else
4050 src = SET_SRC (bl->init_set);
4052 if (loop_dump_stream)
4053 fprintf (loop_dump_stream,
4054 "Biv %d initialized at insn %d: initial value ",
4055 bl->regno, INSN_UID (bl->init_insn));
4057 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4058 || GET_MODE (src) == VOIDmode)
4059 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
4061 bl->initial_value = src;
4063 if (loop_dump_stream)
4065 if (GET_CODE (src) == CONST_INT)
4067 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
4068 fputc ('\n', loop_dump_stream);
4070 else
4072 print_rtl (loop_dump_stream, src);
4073 fprintf (loop_dump_stream, "\n");
4077 else
4079 struct iv_class *bl2 = 0;
4080 rtx increment = NULL_RTX;
4082 /* Biv initial value is not a simple move. If it is the sum of
4083 another biv and a constant, check if both bivs are incremented
4084 in lockstep. Then we are actually looking at a giv.
4085 For simplicity, we only handle the case where there is but a
4086 single increment, and the register is not used elsewhere. */
4087 if (bl->biv_count == 1
4088 && bl->regno < max_reg_before_loop
4089 && uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4090 && GET_CODE (src) == PLUS
4091 && GET_CODE (XEXP (src, 0)) == REG
4092 && CONSTANT_P (XEXP (src, 1))
4093 && ((increment = biv_total_increment (bl, loop_start, loop_end))
4094 != NULL_RTX))
4096 int regno = REGNO (XEXP (src, 0));
4098 for (bl2 = loop_iv_list; bl2; bl2 = bl2->next)
4099 if (bl2->regno == regno)
4100 break;
4103 /* Now, can we transform this biv into a giv? */
4104 if (bl2
4105 && bl2->biv_count == 1
4106 && rtx_equal_p (increment,
4107 biv_total_increment (bl2, loop_start, loop_end))
4108 /* init_insn is only set to insns that are before loop_start
4109 without any intervening labels. */
4110 && ! reg_set_between_p (bl2->biv->src_reg,
4111 PREV_INSN (bl->init_insn), loop_start)
4112 /* The register from BL2 must be set before the register from
4113 BL is set, or we must be able to move the latter set after
4114 the former set. Currently there can't be any labels
4115 in-between when biv_total_increment returns nonzero both times
4116 but we test it here in case some day some real cfg analysis
4117 gets used to set always_computable. */
4118 && (loop_insn_first_p (bl2->biv->insn, bl->biv->insn)
4119 ? no_labels_between_p (bl2->biv->insn, bl->biv->insn)
4120 : (! reg_used_between_p (bl->biv->src_reg, bl->biv->insn,
4121 bl2->biv->insn)
4122 && no_jumps_between_p (bl->biv->insn, bl2->biv->insn)))
4123 && validate_change (bl->biv->insn,
4124 &SET_SRC (single_set (bl->biv->insn)),
4125 copy_rtx (src), 0))
4127 int loop_num = uid_loop_num[INSN_UID (loop_start)];
4128 rtx dominator = loop_number_cont_dominator[loop_num];
4129 rtx giv = bl->biv->src_reg;
4130 rtx giv_insn = bl->biv->insn;
4131 rtx after_giv = NEXT_INSN (giv_insn);
4133 if (loop_dump_stream)
4134 fprintf (loop_dump_stream, "is giv of biv %d\n", bl2->regno);
4135 /* Let this giv be discovered by the generic code. */
4136 REG_IV_TYPE (bl->regno) = UNKNOWN_INDUCT;
4137 reg_biv_class[bl->regno] = NULL_PTR;
4138 /* We can get better optimization if we can move the giv setting
4139 before the first giv use. */
4140 if (dominator
4141 && ! loop_insn_first_p (dominator, scan_start)
4142 && ! reg_set_between_p (bl2->biv->src_reg, loop_start,
4143 dominator)
4144 && ! reg_used_between_p (giv, loop_start, dominator)
4145 && ! reg_used_between_p (giv, giv_insn, loop_end))
4147 rtx p;
4148 rtx next;
4150 for (next = NEXT_INSN (dominator); ; next = NEXT_INSN (next))
4152 if ((GET_RTX_CLASS (GET_CODE (next)) == 'i'
4153 && (reg_mentioned_p (giv, PATTERN (next))
4154 || reg_set_p (bl2->biv->src_reg, next)))
4155 || GET_CODE (next) == JUMP_INSN)
4156 break;
4157 #ifdef HAVE_cc0
4158 if (GET_RTX_CLASS (GET_CODE (next)) != 'i'
4159 || ! sets_cc0_p (PATTERN (next)))
4160 #endif
4161 dominator = next;
4163 if (loop_dump_stream)
4164 fprintf (loop_dump_stream, "move after insn %d\n",
4165 INSN_UID (dominator));
4166 /* Avoid problems with luids by actually moving the insn
4167 and adjusting all luids in the range. */
4168 reorder_insns (giv_insn, giv_insn, dominator);
4169 for (p = dominator; INSN_UID (p) >= max_uid_for_loop; )
4170 p = PREV_INSN (p);
4171 compute_luids (giv_insn, after_giv, INSN_LUID (p));
4172 /* If the only purpose of the init insn is to initialize
4173 this giv, delete it. */
4174 if (single_set (bl->init_insn)
4175 && ! reg_used_between_p (giv, bl->init_insn, loop_start))
4176 delete_insn (bl->init_insn);
4178 else if (! loop_insn_first_p (bl2->biv->insn, bl->biv->insn))
4180 rtx p = PREV_INSN (giv_insn);
4181 while (INSN_UID (p) >= max_uid_for_loop)
4182 p = PREV_INSN (p);
4183 reorder_insns (giv_insn, giv_insn, bl2->biv->insn);
4184 compute_luids (after_giv, NEXT_INSN (giv_insn),
4185 INSN_LUID (p));
4187 /* Remove this biv from the chain. */
4188 if (bl->next)
4190 /* We move the following giv from *bl->next into *bl.
4191 We have to update reg_biv_class for that moved biv
4192 to point to its new address. */
4193 *bl = *bl->next;
4194 reg_biv_class[bl->regno] = bl;
4196 else
4198 *backbl = 0;
4199 break;
4203 /* If we can't make it a giv,
4204 let biv keep initial value of "itself". */
4205 else if (loop_dump_stream)
4206 fprintf (loop_dump_stream, "is complex\n");
4210 /* If a biv is unconditionally incremented several times in a row, convert
4211 all but the last increment into a giv. */
4213 /* Get an upper bound for the number of registers
4214 we might have after all bivs have been processed. */
4215 first_increment_giv = max_reg_num ();
4216 for (n_extra_increment = 0, bl = loop_iv_list; bl; bl = bl->next)
4217 n_extra_increment += bl->biv_count - 1;
4219 /* If the loop contains volatile memory references do not allow any
4220 replacements to take place, since this could loose the volatile markers. */
4221 if (n_extra_increment && ! loop_info->has_volatile)
4223 int nregs = first_increment_giv + n_extra_increment;
4225 /* Reallocate reg_iv_type and reg_iv_info. */
4226 VARRAY_GROW (reg_iv_type, nregs);
4227 VARRAY_GROW (reg_iv_info, nregs);
4229 for (bl = loop_iv_list; bl; bl = bl->next)
4231 struct induction **vp, *v, *next;
4232 int biv_dead_after_loop = 0;
4234 /* The biv increments lists are in reverse order. Fix this first. */
4235 for (v = bl->biv, bl->biv = 0; v; v = next)
4237 next = v->next_iv;
4238 v->next_iv = bl->biv;
4239 bl->biv = v;
4242 /* We must guard against the case that an early exit between v->insn
4243 and next->insn leaves the biv live after the loop, since that
4244 would mean that we'd be missing an increment for the final
4245 value. The following test to set biv_dead_after_loop is like
4246 the first part of the test to set bl->eliminable.
4247 We don't check here if we can calculate the final value, since
4248 this can't succeed if we already know that there is a jump
4249 between v->insn and next->insn, yet next->always_executed is
4250 set and next->maybe_multiple is cleared. Such a combination
4251 implies that the jump destination is outside the loop.
4252 If we want to make this check more sophisticated, we should
4253 check each branch between v->insn and next->insn individually
4254 to see if the biv is dead at its destination. */
4256 if (uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4257 && bl->init_insn
4258 && INSN_UID (bl->init_insn) < max_uid_for_loop
4259 && (uid_luid[REGNO_FIRST_UID (bl->regno)]
4260 >= INSN_LUID (bl->init_insn))
4261 #ifdef HAVE_decrement_and_branch_until_zero
4262 && ! bl->nonneg
4263 #endif
4264 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4265 biv_dead_after_loop = 1;
4267 for (vp = &bl->biv, next = *vp; v = next, next = v->next_iv;)
4269 HOST_WIDE_INT offset;
4270 rtx set, add_val, old_reg, dest_reg, last_use_insn, note;
4271 int old_regno, new_regno;
4273 if (! v->always_executed
4274 || v->maybe_multiple
4275 || GET_CODE (v->add_val) != CONST_INT
4276 || ! next->always_executed
4277 || next->maybe_multiple
4278 || ! CONSTANT_P (next->add_val)
4279 || v->mult_val != const1_rtx
4280 || next->mult_val != const1_rtx
4281 || ! (biv_dead_after_loop
4282 || no_jumps_between_p (v->insn, next->insn)))
4284 vp = &v->next_iv;
4285 continue;
4287 offset = INTVAL (v->add_val);
4288 set = single_set (v->insn);
4289 add_val = plus_constant (next->add_val, offset);
4290 old_reg = v->dest_reg;
4291 dest_reg = gen_reg_rtx (v->mode);
4293 /* Unlike reg_iv_type / reg_iv_info, the other three arrays
4294 have been allocated with some slop space, so we may not
4295 actually need to reallocate them. If we do, the following
4296 if statement will be executed just once in this loop. */
4297 if ((unsigned) max_reg_num () > n_times_set->num_elements)
4299 /* Grow all the remaining arrays. */
4300 VARRAY_GROW (set_in_loop, nregs);
4301 VARRAY_GROW (n_times_set, nregs);
4302 VARRAY_GROW (may_not_optimize, nregs);
4303 VARRAY_GROW (reg_single_usage, nregs);
4306 if (! validate_change (next->insn, next->location, add_val, 0))
4308 vp = &v->next_iv;
4309 continue;
4312 /* Here we can try to eliminate the increment by combining
4313 it into the uses. */
4315 /* Set last_use_insn so that we can check against it. */
4317 for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
4318 p != next->insn;
4319 p = next_insn_in_loop (p, scan_start, end, loop_top))
4321 if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
4322 continue;
4323 if (reg_mentioned_p (old_reg, PATTERN (p)))
4325 last_use_insn = p;
4329 /* If we can't get the LUIDs for the insns, we can't
4330 calculate the lifetime. This is likely from unrolling
4331 of an inner loop, so there is little point in making this
4332 a DEST_REG giv anyways. */
4333 if (INSN_UID (v->insn) >= max_uid_for_loop
4334 || INSN_UID (last_use_insn) >= max_uid_for_loop
4335 || ! validate_change (v->insn, &SET_DEST (set), dest_reg, 0))
4337 /* Change the increment at NEXT back to what it was. */
4338 if (! validate_change (next->insn, next->location,
4339 next->add_val, 0))
4340 abort ();
4341 vp = &v->next_iv;
4342 continue;
4344 next->add_val = add_val;
4345 v->dest_reg = dest_reg;
4346 v->giv_type = DEST_REG;
4347 v->location = &SET_SRC (set);
4348 v->cant_derive = 0;
4349 v->combined_with = 0;
4350 v->maybe_dead = 0;
4351 v->derive_adjustment = 0;
4352 v->same = 0;
4353 v->ignore = 0;
4354 v->new_reg = 0;
4355 v->final_value = 0;
4356 v->same_insn = 0;
4357 v->auto_inc_opt = 0;
4358 v->unrolled = 0;
4359 v->shared = 0;
4360 v->derived_from = 0;
4361 v->always_computable = 1;
4362 v->always_executed = 1;
4363 v->replaceable = 1;
4364 v->no_const_addval = 0;
4366 old_regno = REGNO (old_reg);
4367 new_regno = REGNO (dest_reg);
4368 VARRAY_INT (set_in_loop, old_regno)--;
4369 VARRAY_INT (set_in_loop, new_regno) = 1;
4370 VARRAY_INT (n_times_set, old_regno)--;
4371 VARRAY_INT (n_times_set, new_regno) = 1;
4372 VARRAY_CHAR (may_not_optimize, new_regno) = 0;
4374 REG_IV_TYPE (new_regno) = GENERAL_INDUCT;
4375 REG_IV_INFO (new_regno) = v;
4377 /* If next_insn has a REG_EQUAL note that mentiones OLD_REG,
4378 it must be replaced. */
4379 note = find_reg_note (next->insn, REG_EQUAL, NULL_RTX);
4380 if (note && reg_mentioned_p (old_reg, XEXP (note, 0)))
4381 XEXP (note, 0) = copy_rtx (SET_SRC (single_set (next->insn)));
4383 /* Remove the increment from the list of biv increments,
4384 and record it as a giv. */
4385 *vp = next;
4386 bl->biv_count--;
4387 v->next_iv = bl->giv;
4388 bl->giv = v;
4389 bl->giv_count++;
4390 v->benefit = rtx_cost (SET_SRC (set), SET);
4391 bl->total_benefit += v->benefit;
4393 /* Now replace the biv with DEST_REG in all insns between
4394 the replaced increment and the next increment, and
4395 remember the last insn that needed a replacement. */
4396 for (last_use_insn = v->insn, p = NEXT_INSN (v->insn);
4397 p != next->insn;
4398 p = next_insn_in_loop (p, scan_start, end, loop_top))
4400 rtx note;
4402 if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
4403 continue;
4404 if (reg_mentioned_p (old_reg, PATTERN (p)))
4406 last_use_insn = p;
4407 if (! validate_replace_rtx (old_reg, dest_reg, p))
4408 abort ();
4410 for (note = REG_NOTES (p); note; note = XEXP (note, 1))
4412 if (GET_CODE (note) == EXPR_LIST)
4413 XEXP (note, 0)
4414 = replace_rtx (XEXP (note, 0), old_reg, dest_reg);
4418 v->last_use = last_use_insn;
4419 v->lifetime = INSN_LUID (v->insn) - INSN_LUID (last_use_insn);
4420 /* If the lifetime is zero, it means that this register is really
4421 a dead store. So mark this as a giv that can be ignored.
4422 This will not prevent the biv from being eliminated. */
4423 if (v->lifetime == 0)
4424 v->ignore = 1;
4426 if (loop_dump_stream)
4427 fprintf (loop_dump_stream,
4428 "Increment %d of biv %d converted to giv %d.\n\n",
4429 INSN_UID (v->insn), old_regno, new_regno);
4433 last_increment_giv = max_reg_num () - 1;
4435 /* Search the loop for general induction variables. */
4437 /* A register is a giv if: it is only set once, it is a function of a
4438 biv and a constant (or invariant), and it is not a biv. */
4440 not_every_iteration = 0;
4441 loop_depth = 0;
4442 maybe_multiple = 0;
4443 p = scan_start;
4444 while (1)
4446 p = NEXT_INSN (p);
4447 /* At end of a straight-in loop, we are done.
4448 At end of a loop entered at the bottom, scan the top. */
4449 if (p == scan_start)
4450 break;
4451 if (p == end)
4453 if (loop_top != 0)
4454 p = loop_top;
4455 else
4456 break;
4457 if (p == scan_start)
4458 break;
4461 /* Look for a general induction variable in a register. */
4462 if (GET_CODE (p) == INSN
4463 && (set = single_set (p))
4464 && GET_CODE (SET_DEST (set)) == REG
4465 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
4467 rtx src_reg;
4468 rtx add_val;
4469 rtx mult_val;
4470 int benefit;
4471 rtx regnote = 0;
4472 rtx last_consec_insn;
4474 dest_reg = SET_DEST (set);
4475 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4476 continue;
4478 if (/* SET_SRC is a giv. */
4479 (general_induction_var (SET_SRC (set), &src_reg, &add_val,
4480 &mult_val, 0, &benefit)
4481 /* Equivalent expression is a giv. */
4482 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4483 && general_induction_var (XEXP (regnote, 0), &src_reg,
4484 &add_val, &mult_val, 0,
4485 &benefit)))
4486 /* Don't try to handle any regs made by loop optimization.
4487 We have nothing on them in regno_first_uid, etc. */
4488 && REGNO (dest_reg) < max_reg_before_loop
4489 /* Don't recognize a BASIC_INDUCT_VAR here. */
4490 && dest_reg != src_reg
4491 /* This must be the only place where the register is set. */
4492 && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
4493 /* or all sets must be consecutive and make a giv. */
4494 || (benefit = consec_sets_giv (benefit, p,
4495 src_reg, dest_reg,
4496 &add_val, &mult_val,
4497 &last_consec_insn))))
4499 struct induction *v
4500 = (struct induction *) alloca (sizeof (struct induction));
4502 /* If this is a library call, increase benefit. */
4503 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4504 benefit += libcall_benefit (p);
4506 /* Skip the consecutive insns, if there are any. */
4507 if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1)
4508 p = last_consec_insn;
4510 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
4511 DEST_REG, not_every_iteration, maybe_multiple,
4512 NULL_PTR, loop_start, loop_end);
4517 #ifndef DONT_REDUCE_ADDR
4518 /* Look for givs which are memory addresses. */
4519 /* This resulted in worse code on a VAX 8600. I wonder if it
4520 still does. */
4521 if (GET_CODE (p) == INSN)
4522 find_mem_givs (PATTERN (p), p, not_every_iteration, maybe_multiple,
4523 loop_start, loop_end);
4524 #endif
4526 /* Update the status of whether giv can derive other givs. This can
4527 change when we pass a label or an insn that updates a biv. */
4528 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4529 || GET_CODE (p) == CODE_LABEL)
4530 update_giv_derive (p);
4532 /* Past CODE_LABEL, we get to insns that may be executed multiple
4533 times. The only way we can be sure that they can't is if every
4534 every jump insn between here and the end of the loop either
4535 returns, exits the loop, is a forward jump, or is a jump
4536 to the loop start. */
4538 if (GET_CODE (p) == CODE_LABEL)
4540 rtx insn = p;
4542 maybe_multiple = 0;
4544 while (1)
4546 insn = NEXT_INSN (insn);
4547 if (insn == scan_start)
4548 break;
4549 if (insn == end)
4551 if (loop_top != 0)
4552 insn = loop_top;
4553 else
4554 break;
4555 if (insn == scan_start)
4556 break;
4559 if (GET_CODE (insn) == JUMP_INSN
4560 && GET_CODE (PATTERN (insn)) != RETURN
4561 && (! condjump_p (insn)
4562 || (JUMP_LABEL (insn) != 0
4563 && JUMP_LABEL (insn) != scan_start
4564 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
4565 || INSN_UID (insn) >= max_uid_for_loop
4566 || (INSN_LUID (JUMP_LABEL (insn))
4567 < INSN_LUID (insn))))))
4569 maybe_multiple = 1;
4570 break;
4575 /* Past a jump, we get to insns for which we can't count
4576 on whether they will be executed during each iteration. */
4577 /* This code appears twice in strength_reduce. There is also similar
4578 code in scan_loop. */
4579 if (GET_CODE (p) == JUMP_INSN
4580 /* If we enter the loop in the middle, and scan around to the
4581 beginning, don't set not_every_iteration for that.
4582 This can be any kind of jump, since we want to know if insns
4583 will be executed if the loop is executed. */
4584 && ! (JUMP_LABEL (p) == loop_top
4585 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
4586 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
4588 rtx label = 0;
4590 /* If this is a jump outside the loop, then it also doesn't
4591 matter. Check to see if the target of this branch is on the
4592 loop_number_exits_labels list. */
4594 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
4595 label;
4596 label = LABEL_NEXTREF (label))
4597 if (XEXP (label, 0) == JUMP_LABEL (p))
4598 break;
4600 if (! label)
4601 not_every_iteration = 1;
4604 else if (GET_CODE (p) == NOTE)
4606 /* At the virtual top of a converted loop, insns are again known to
4607 be executed each iteration: logically, the loop begins here
4608 even though the exit code has been duplicated.
4610 Insns are also again known to be executed each iteration at
4611 the LOOP_CONT note. */
4612 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4613 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4614 && loop_depth == 0)
4615 not_every_iteration = 0;
4616 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4617 loop_depth++;
4618 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4619 loop_depth--;
4622 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4623 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4624 or not an insn is known to be executed each iteration of the
4625 loop, whether or not any iterations are known to occur.
4627 Therefore, if we have just passed a label and have no more labels
4628 between here and the test insn of the loop, we know these insns
4629 will be executed each iteration. */
4631 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
4632 && no_labels_between_p (p, loop_end)
4633 && loop_insn_first_p (p, loop_cont))
4634 not_every_iteration = 0;
4637 /* Try to calculate and save the number of loop iterations. This is
4638 set to zero if the actual number can not be calculated. This must
4639 be called after all giv's have been identified, since otherwise it may
4640 fail if the iteration variable is a giv. */
4642 loop_iterations (loop_start, loop_end, loop_info);
4644 /* Now for each giv for which we still don't know whether or not it is
4645 replaceable, check to see if it is replaceable because its final value
4646 can be calculated. This must be done after loop_iterations is called,
4647 so that final_giv_value will work correctly. */
4649 for (bl = loop_iv_list; bl; bl = bl->next)
4651 struct induction *v;
4653 for (v = bl->giv; v; v = v->next_iv)
4654 if (! v->replaceable && ! v->not_replaceable)
4655 check_final_value (v, loop_start, loop_end, loop_info->n_iterations);
4658 /* Try to prove that the loop counter variable (if any) is always
4659 nonnegative; if so, record that fact with a REG_NONNEG note
4660 so that "decrement and branch until zero" insn can be used. */
4661 check_dbra_loop (loop_end, insn_count, loop_start, loop_info);
4663 /* Create reg_map to hold substitutions for replaceable giv regs.
4664 Some givs might have been made from biv increments, so look at
4665 reg_iv_type for a suitable size. */
4666 reg_map_size = reg_iv_type->num_elements;
4667 reg_map = (rtx *) alloca (reg_map_size * sizeof (rtx));
4668 bzero ((char *) reg_map, reg_map_size * sizeof (rtx));
4670 /* Examine each iv class for feasibility of strength reduction/induction
4671 variable elimination. */
4673 for (bl = loop_iv_list; bl; bl = bl->next)
4675 struct induction *v;
4676 int benefit;
4677 int all_reduced;
4678 rtx final_value = 0;
4679 unsigned int nregs;
4681 /* Test whether it will be possible to eliminate this biv
4682 provided all givs are reduced. This is possible if either
4683 the reg is not used outside the loop, or we can compute
4684 what its final value will be.
4686 For architectures with a decrement_and_branch_until_zero insn,
4687 don't do this if we put a REG_NONNEG note on the endtest for
4688 this biv. */
4690 /* Compare against bl->init_insn rather than loop_start.
4691 We aren't concerned with any uses of the biv between
4692 init_insn and loop_start since these won't be affected
4693 by the value of the biv elsewhere in the function, so
4694 long as init_insn doesn't use the biv itself.
4695 March 14, 1989 -- self@bayes.arc.nasa.gov */
4697 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
4698 && bl->init_insn
4699 && INSN_UID (bl->init_insn) < max_uid_for_loop
4700 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
4701 #ifdef HAVE_decrement_and_branch_until_zero
4702 && ! bl->nonneg
4703 #endif
4704 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4705 || ((final_value = final_biv_value (bl, loop_start, loop_end,
4706 loop_info->n_iterations))
4707 #ifdef HAVE_decrement_and_branch_until_zero
4708 && ! bl->nonneg
4709 #endif
4711 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
4712 threshold, insn_count);
4713 else
4715 if (loop_dump_stream)
4717 fprintf (loop_dump_stream,
4718 "Cannot eliminate biv %d.\n",
4719 bl->regno);
4720 fprintf (loop_dump_stream,
4721 "First use: insn %d, last use: insn %d.\n",
4722 REGNO_FIRST_UID (bl->regno),
4723 REGNO_LAST_UID (bl->regno));
4727 /* Combine all giv's for this iv_class. */
4728 combine_givs (bl);
4730 /* This will be true at the end, if all givs which depend on this
4731 biv have been strength reduced.
4732 We can't (currently) eliminate the biv unless this is so. */
4733 all_reduced = 1;
4735 /* Check each giv in this class to see if we will benefit by reducing
4736 it. Skip giv's combined with others. */
4737 for (v = bl->giv; v; v = v->next_iv)
4739 struct induction *tv;
4741 if (v->ignore || v->same)
4742 continue;
4744 benefit = v->benefit;
4746 /* Reduce benefit if not replaceable, since we will insert
4747 a move-insn to replace the insn that calculates this giv.
4748 Don't do this unless the giv is a user variable, since it
4749 will often be marked non-replaceable because of the duplication
4750 of the exit code outside the loop. In such a case, the copies
4751 we insert are dead and will be deleted. So they don't have
4752 a cost. Similar situations exist. */
4753 /* ??? The new final_[bg]iv_value code does a much better job
4754 of finding replaceable giv's, and hence this code may no longer
4755 be necessary. */
4756 if (! v->replaceable && ! bl->eliminable
4757 && REG_USERVAR_P (v->dest_reg))
4758 benefit -= copy_cost;
4760 /* Decrease the benefit to count the add-insns that we will
4761 insert to increment the reduced reg for the giv. */
4762 benefit -= add_cost * bl->biv_count;
4764 /* Decide whether to strength-reduce this giv or to leave the code
4765 unchanged (recompute it from the biv each time it is used).
4766 This decision can be made independently for each giv. */
4768 #ifdef AUTO_INC_DEC
4769 /* Attempt to guess whether autoincrement will handle some of the
4770 new add insns; if so, increase BENEFIT (undo the subtraction of
4771 add_cost that was done above). */
4772 if (v->giv_type == DEST_ADDR
4773 && GET_CODE (v->mult_val) == CONST_INT)
4775 if (HAVE_POST_INCREMENT
4776 && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4777 benefit += add_cost * bl->biv_count;
4778 else if (HAVE_PRE_INCREMENT
4779 && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4780 benefit += add_cost * bl->biv_count;
4781 else if (HAVE_POST_DECREMENT
4782 && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4783 benefit += add_cost * bl->biv_count;
4784 else if (HAVE_PRE_DECREMENT
4785 && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4786 benefit += add_cost * bl->biv_count;
4788 #endif
4790 /* If an insn is not to be strength reduced, then set its ignore
4791 flag, and clear all_reduced. */
4793 /* A giv that depends on a reversed biv must be reduced if it is
4794 used after the loop exit, otherwise, it would have the wrong
4795 value after the loop exit. To make it simple, just reduce all
4796 of such giv's whether or not we know they are used after the loop
4797 exit. */
4799 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4800 && ! bl->reversed )
4802 if (loop_dump_stream)
4803 fprintf (loop_dump_stream,
4804 "giv of insn %d not worth while, %d vs %d.\n",
4805 INSN_UID (v->insn),
4806 v->lifetime * threshold * benefit, insn_count);
4807 v->ignore = 1;
4808 all_reduced = 0;
4810 else
4812 /* Check that we can increment the reduced giv without a
4813 multiply insn. If not, reject it. */
4815 for (tv = bl->biv; tv; tv = tv->next_iv)
4816 if (tv->mult_val == const1_rtx
4817 && ! product_cheap_p (tv->add_val, v->mult_val))
4819 if (loop_dump_stream)
4820 fprintf (loop_dump_stream,
4821 "giv of insn %d: would need a multiply.\n",
4822 INSN_UID (v->insn));
4823 v->ignore = 1;
4824 all_reduced = 0;
4825 break;
4830 /* Check for givs whose first use is their definition and whose
4831 last use is the definition of another giv. If so, it is likely
4832 dead and should not be used to derive another giv nor to
4833 eliminate a biv. */
4834 for (v = bl->giv; v; v = v->next_iv)
4836 if (v->ignore
4837 || (v->same && v->same->ignore))
4838 continue;
4840 if (v->last_use)
4842 struct induction *v1;
4844 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4845 if (v->last_use == v1->insn)
4846 v->maybe_dead = 1;
4848 else if (v->giv_type == DEST_REG
4849 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4851 struct induction *v1;
4853 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4854 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4855 v->maybe_dead = 1;
4859 /* Now that we know which givs will be reduced, try to rearrange the
4860 combinations to reduce register pressure.
4861 recombine_givs calls find_life_end, which needs reg_iv_type and
4862 reg_iv_info to be valid for all pseudos. We do the necessary
4863 reallocation here since it allows to check if there are still
4864 more bivs to process. */
4865 nregs = max_reg_num ();
4866 if (nregs > reg_iv_type->num_elements)
4868 /* If there are still more bivs to process, allocate some slack
4869 space so that we're not constantly reallocating these arrays. */
4870 if (bl->next)
4871 nregs += nregs / 4;
4872 /* Reallocate reg_iv_type and reg_iv_info. */
4873 VARRAY_GROW (reg_iv_type, nregs);
4874 VARRAY_GROW (reg_iv_info, nregs);
4876 recombine_givs (bl, loop_start, loop_end, unroll_p);
4878 /* Reduce each giv that we decided to reduce. */
4880 for (v = bl->giv; v; v = v->next_iv)
4882 struct induction *tv;
4883 if (! v->ignore && v->same == 0)
4885 int auto_inc_opt = 0;
4887 /* If the code for derived givs immediately below has already
4888 allocated a new_reg, we must keep it. */
4889 if (! v->new_reg)
4890 v->new_reg = gen_reg_rtx (v->mode);
4892 if (v->derived_from)
4894 struct induction *d = v->derived_from;
4896 /* In case d->dest_reg is not replaceable, we have
4897 to replace it in v->insn now. */
4898 if (! d->new_reg)
4899 d->new_reg = gen_reg_rtx (d->mode);
4900 PATTERN (v->insn)
4901 = replace_rtx (PATTERN (v->insn), d->dest_reg, d->new_reg);
4902 PATTERN (v->insn)
4903 = replace_rtx (PATTERN (v->insn), v->dest_reg, v->new_reg);
4904 /* For each place where the biv is incremented, add an
4905 insn to set the new, reduced reg for the giv.
4906 We used to do this only for biv_count != 1, but
4907 this fails when there is a giv after a single biv
4908 increment, e.g. when the last giv was expressed as
4909 pre-decrement. */
4910 for (tv = bl->biv; tv; tv = tv->next_iv)
4912 /* We always emit reduced giv increments before the
4913 biv increment when bl->biv_count != 1. So by
4914 emitting the add insns for derived givs after the
4915 biv increment, they pick up the updated value of
4916 the reduced giv.
4917 If the reduced giv is processed with
4918 auto_inc_opt == 1, then it is incremented earlier
4919 than the biv, hence we'll still pick up the right
4920 value.
4921 If it's processed with auto_inc_opt == -1,
4922 that implies that the biv increment is before the
4923 first reduced giv's use. The derived giv's lifetime
4924 is after the reduced giv's lifetime, hence in this
4925 case, the biv increment doesn't matter. */
4926 emit_insn_after (copy_rtx (PATTERN (v->insn)), tv->insn);
4928 continue;
4931 #ifdef AUTO_INC_DEC
4932 /* If the target has auto-increment addressing modes, and
4933 this is an address giv, then try to put the increment
4934 immediately after its use, so that flow can create an
4935 auto-increment addressing mode. */
4936 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4937 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4938 /* We don't handle reversed biv's because bl->biv->insn
4939 does not have a valid INSN_LUID. */
4940 && ! bl->reversed
4941 && v->always_executed && ! v->maybe_multiple
4942 && INSN_UID (v->insn) < max_uid_for_loop)
4944 /* If other giv's have been combined with this one, then
4945 this will work only if all uses of the other giv's occur
4946 before this giv's insn. This is difficult to check.
4948 We simplify this by looking for the common case where
4949 there is one DEST_REG giv, and this giv's insn is the
4950 last use of the dest_reg of that DEST_REG giv. If the
4951 increment occurs after the address giv, then we can
4952 perform the optimization. (Otherwise, the increment
4953 would have to go before other_giv, and we would not be
4954 able to combine it with the address giv to get an
4955 auto-inc address.) */
4956 if (v->combined_with)
4958 struct induction *other_giv = 0;
4960 for (tv = bl->giv; tv; tv = tv->next_iv)
4961 if (tv->same == v)
4963 if (other_giv)
4964 break;
4965 else
4966 other_giv = tv;
4968 if (! tv && other_giv
4969 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4970 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4971 == INSN_UID (v->insn))
4972 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4973 auto_inc_opt = 1;
4975 /* Check for case where increment is before the address
4976 giv. Do this test in "loop order". */
4977 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4978 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4979 || (INSN_LUID (bl->biv->insn)
4980 > INSN_LUID (scan_start))))
4981 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4982 && (INSN_LUID (scan_start)
4983 < INSN_LUID (bl->biv->insn))))
4984 auto_inc_opt = -1;
4985 else
4986 auto_inc_opt = 1;
4988 #ifdef HAVE_cc0
4990 rtx prev;
4992 /* We can't put an insn immediately after one setting
4993 cc0, or immediately before one using cc0. */
4994 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4995 || (auto_inc_opt == -1
4996 && (prev = prev_nonnote_insn (v->insn)) != 0
4997 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4998 && sets_cc0_p (PATTERN (prev))))
4999 auto_inc_opt = 0;
5001 #endif
5003 if (auto_inc_opt)
5004 v->auto_inc_opt = 1;
5006 #endif
5008 /* For each place where the biv is incremented, add an insn
5009 to increment the new, reduced reg for the giv. */
5010 for (tv = bl->biv; tv; tv = tv->next_iv)
5012 rtx insert_before;
5014 if (! auto_inc_opt)
5015 insert_before = tv->insn;
5016 else if (auto_inc_opt == 1)
5017 insert_before = NEXT_INSN (v->insn);
5018 else
5019 insert_before = v->insn;
5021 if (tv->mult_val == const1_rtx)
5022 emit_iv_add_mult (tv->add_val, v->mult_val,
5023 v->new_reg, v->new_reg, insert_before);
5024 else /* tv->mult_val == const0_rtx */
5025 /* A multiply is acceptable here
5026 since this is presumed to be seldom executed. */
5027 emit_iv_add_mult (tv->add_val, v->mult_val,
5028 v->add_val, v->new_reg, insert_before);
5031 /* Add code at loop start to initialize giv's reduced reg. */
5033 emit_iv_add_mult (bl->initial_value, v->mult_val,
5034 v->add_val, v->new_reg, loop_start);
5038 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5039 as not reduced.
5041 For each giv register that can be reduced now: if replaceable,
5042 substitute reduced reg wherever the old giv occurs;
5043 else add new move insn "giv_reg = reduced_reg". */
5045 for (v = bl->giv; v; v = v->next_iv)
5047 if (v->same && v->same->ignore)
5048 v->ignore = 1;
5050 if (v->ignore)
5051 continue;
5053 /* Update expression if this was combined, in case other giv was
5054 replaced. */
5055 if (v->same)
5056 v->new_reg = replace_rtx (v->new_reg,
5057 v->same->dest_reg, v->same->new_reg);
5059 if (v->giv_type == DEST_ADDR)
5060 /* Store reduced reg as the address in the memref where we found
5061 this giv. */
5062 validate_change (v->insn, v->location, v->new_reg, 0);
5063 else if (v->replaceable)
5065 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5067 #if 0
5068 /* I can no longer duplicate the original problem. Perhaps
5069 this is unnecessary now? */
5071 /* Replaceable; it isn't strictly necessary to delete the old
5072 insn and emit a new one, because v->dest_reg is now dead.
5074 However, especially when unrolling loops, the special
5075 handling for (set REG0 REG1) in the second cse pass may
5076 make v->dest_reg live again. To avoid this problem, emit
5077 an insn to set the original giv reg from the reduced giv.
5078 We can not delete the original insn, since it may be part
5079 of a LIBCALL, and the code in flow that eliminates dead
5080 libcalls will fail if it is deleted. */
5081 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
5082 v->insn);
5083 #endif
5085 else
5087 /* Not replaceable; emit an insn to set the original giv reg from
5088 the reduced giv, same as above. */
5089 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
5090 v->insn);
5093 /* When a loop is reversed, givs which depend on the reversed
5094 biv, and which are live outside the loop, must be set to their
5095 correct final value. This insn is only needed if the giv is
5096 not replaceable. The correct final value is the same as the
5097 value that the giv starts the reversed loop with. */
5098 if (bl->reversed && ! v->replaceable)
5099 emit_iv_add_mult (bl->initial_value, v->mult_val,
5100 v->add_val, v->dest_reg, end_insert_before);
5101 else if (v->final_value)
5103 rtx insert_before;
5105 /* If the loop has multiple exits, emit the insn before the
5106 loop to ensure that it will always be executed no matter
5107 how the loop exits. Otherwise, emit the insn after the loop,
5108 since this is slightly more efficient. */
5109 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
5110 insert_before = loop_start;
5111 else
5112 insert_before = end_insert_before;
5113 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
5114 insert_before);
5116 #if 0
5117 /* If the insn to set the final value of the giv was emitted
5118 before the loop, then we must delete the insn inside the loop
5119 that sets it. If this is a LIBCALL, then we must delete
5120 every insn in the libcall. Note, however, that
5121 final_giv_value will only succeed when there are multiple
5122 exits if the giv is dead at each exit, hence it does not
5123 matter that the original insn remains because it is dead
5124 anyways. */
5125 /* Delete the insn inside the loop that sets the giv since
5126 the giv is now set before (or after) the loop. */
5127 delete_insn (v->insn);
5128 #endif
5131 if (loop_dump_stream)
5133 fprintf (loop_dump_stream, "giv at %d reduced to ",
5134 INSN_UID (v->insn));
5135 print_rtl (loop_dump_stream, v->new_reg);
5136 fprintf (loop_dump_stream, "\n");
5140 /* All the givs based on the biv bl have been reduced if they
5141 merit it. */
5143 /* For each giv not marked as maybe dead that has been combined with a
5144 second giv, clear any "maybe dead" mark on that second giv.
5145 v->new_reg will either be or refer to the register of the giv it
5146 combined with.
5148 Doing this clearing avoids problems in biv elimination where a
5149 giv's new_reg is a complex value that can't be put in the insn but
5150 the giv combined with (with a reg as new_reg) is marked maybe_dead.
5151 Since the register will be used in either case, we'd prefer it be
5152 used from the simpler giv. */
5154 for (v = bl->giv; v; v = v->next_iv)
5155 if (! v->maybe_dead && v->same)
5156 v->same->maybe_dead = 0;
5158 /* Try to eliminate the biv, if it is a candidate.
5159 This won't work if ! all_reduced,
5160 since the givs we planned to use might not have been reduced.
5162 We have to be careful that we didn't initially think we could eliminate
5163 this biv because of a giv that we now think may be dead and shouldn't
5164 be used as a biv replacement.
5166 Also, there is the possibility that we may have a giv that looks
5167 like it can be used to eliminate a biv, but the resulting insn
5168 isn't valid. This can happen, for example, on the 88k, where a
5169 JUMP_INSN can compare a register only with zero. Attempts to
5170 replace it with a compare with a constant will fail.
5172 Note that in cases where this call fails, we may have replaced some
5173 of the occurrences of the biv with a giv, but no harm was done in
5174 doing so in the rare cases where it can occur. */
5176 if (all_reduced == 1 && bl->eliminable
5177 && maybe_eliminate_biv (bl, loop_start, end, 1,
5178 threshold, insn_count))
5181 /* ?? If we created a new test to bypass the loop entirely,
5182 or otherwise drop straight in, based on this test, then
5183 we might want to rewrite it also. This way some later
5184 pass has more hope of removing the initialization of this
5185 biv entirely. */
5187 /* If final_value != 0, then the biv may be used after loop end
5188 and we must emit an insn to set it just in case.
5190 Reversed bivs already have an insn after the loop setting their
5191 value, so we don't need another one. We can't calculate the
5192 proper final value for such a biv here anyways. */
5193 if (final_value != 0 && ! bl->reversed)
5195 rtx insert_before;
5197 /* If the loop has multiple exits, emit the insn before the
5198 loop to ensure that it will always be executed no matter
5199 how the loop exits. Otherwise, emit the insn after the
5200 loop, since this is slightly more efficient. */
5201 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
5202 insert_before = loop_start;
5203 else
5204 insert_before = end_insert_before;
5206 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
5207 end_insert_before);
5210 #if 0
5211 /* Delete all of the instructions inside the loop which set
5212 the biv, as they are all dead. If is safe to delete them,
5213 because an insn setting a biv will never be part of a libcall. */
5214 /* However, deleting them will invalidate the regno_last_uid info,
5215 so keeping them around is more convenient. Final_biv_value
5216 will only succeed when there are multiple exits if the biv
5217 is dead at each exit, hence it does not matter that the original
5218 insn remains, because it is dead anyways. */
5219 for (v = bl->biv; v; v = v->next_iv)
5220 delete_insn (v->insn);
5221 #endif
5223 if (loop_dump_stream)
5224 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5225 bl->regno);
5229 /* Go through all the instructions in the loop, making all the
5230 register substitutions scheduled in REG_MAP. */
5232 for (p = loop_start; p != end; p = NEXT_INSN (p))
5233 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5234 || GET_CODE (p) == CALL_INSN)
5236 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5237 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5238 INSN_CODE (p) = -1;
5241 if (loop_info->n_iterations > 0)
5243 /* When we completely unroll a loop we will likely not need the increment
5244 of the loop BIV and we will not need the conditional branch at the
5245 end of the loop. */
5246 unrolled_insn_copies = insn_count - 2;
5248 #ifdef HAVE_cc0
5249 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5250 need the comparison before the conditional branch at the end of the
5251 loop. */
5252 unrolled_insn_copies -= 1;
5253 #endif
5255 /* We'll need one copy for each loop iteration. */
5256 unrolled_insn_copies *= loop_info->n_iterations;
5258 /* A little slop to account for the ability to remove initialization
5259 code, better CSE, and other secondary benefits of completely
5260 unrolling some loops. */
5261 unrolled_insn_copies -= 1;
5263 /* Clamp the value. */
5264 if (unrolled_insn_copies < 0)
5265 unrolled_insn_copies = 0;
5268 /* Unroll loops from within strength reduction so that we can use the
5269 induction variable information that strength_reduce has already
5270 collected. Always unroll loops that would be as small or smaller
5271 unrolled than when rolled. */
5272 if (unroll_p
5273 || (loop_info->n_iterations > 0
5274 && unrolled_insn_copies <= insn_count))
5275 unroll_loop (loop_end, insn_count, loop_start, end_insert_before,
5276 loop_info, 1);
5278 #ifdef HAVE_decrement_and_branch_on_count
5279 /* Instrument the loop with BCT insn. */
5280 if (HAVE_decrement_and_branch_on_count && bct_p
5281 && flag_branch_on_count_reg)
5282 insert_bct (loop_start, loop_end, loop_info);
5283 #endif /* HAVE_decrement_and_branch_on_count */
5285 if (loop_dump_stream)
5286 fprintf (loop_dump_stream, "\n");
5288 egress:
5289 VARRAY_FREE (reg_iv_type);
5290 VARRAY_FREE (reg_iv_info);
5293 /* Return 1 if X is a valid source for an initial value (or as value being
5294 compared against in an initial test).
5296 X must be either a register or constant and must not be clobbered between
5297 the current insn and the start of the loop.
5299 INSN is the insn containing X. */
5301 static int
5302 valid_initial_value_p (x, insn, call_seen, loop_start)
5303 rtx x;
5304 rtx insn;
5305 int call_seen;
5306 rtx loop_start;
5308 if (CONSTANT_P (x))
5309 return 1;
5311 /* Only consider pseudos we know about initialized in insns whose luids
5312 we know. */
5313 if (GET_CODE (x) != REG
5314 || REGNO (x) >= max_reg_before_loop)
5315 return 0;
5317 /* Don't use call-clobbered registers across a call which clobbers it. On
5318 some machines, don't use any hard registers at all. */
5319 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5320 && (SMALL_REGISTER_CLASSES
5321 || (call_used_regs[REGNO (x)] && call_seen)))
5322 return 0;
5324 /* Don't use registers that have been clobbered before the start of the
5325 loop. */
5326 if (reg_set_between_p (x, insn, loop_start))
5327 return 0;
5329 return 1;
5332 /* Scan X for memory refs and check each memory address
5333 as a possible giv. INSN is the insn whose pattern X comes from.
5334 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5335 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5336 more thanonce in each loop iteration. */
5338 static void
5339 find_mem_givs (x, insn, not_every_iteration, maybe_multiple, loop_start,
5340 loop_end)
5341 rtx x;
5342 rtx insn;
5343 int not_every_iteration, maybe_multiple;
5344 rtx loop_start, loop_end;
5346 register int i, j;
5347 register enum rtx_code code;
5348 register const char *fmt;
5350 if (x == 0)
5351 return;
5353 code = GET_CODE (x);
5354 switch (code)
5356 case REG:
5357 case CONST_INT:
5358 case CONST:
5359 case CONST_DOUBLE:
5360 case SYMBOL_REF:
5361 case LABEL_REF:
5362 case PC:
5363 case CC0:
5364 case ADDR_VEC:
5365 case ADDR_DIFF_VEC:
5366 case USE:
5367 case CLOBBER:
5368 return;
5370 case MEM:
5372 rtx src_reg;
5373 rtx add_val;
5374 rtx mult_val;
5375 int benefit;
5377 /* This code used to disable creating GIVs with mult_val == 1 and
5378 add_val == 0. However, this leads to lost optimizations when
5379 it comes time to combine a set of related DEST_ADDR GIVs, since
5380 this one would not be seen. */
5382 if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
5383 &mult_val, 1, &benefit))
5385 /* Found one; record it. */
5386 struct induction *v
5387 = (struct induction *) oballoc (sizeof (struct induction));
5389 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
5390 add_val, benefit, DEST_ADDR, not_every_iteration,
5391 maybe_multiple, &XEXP (x, 0), loop_start, loop_end);
5393 v->mem_mode = GET_MODE (x);
5396 return;
5398 default:
5399 break;
5402 /* Recursively scan the subexpressions for other mem refs. */
5404 fmt = GET_RTX_FORMAT (code);
5405 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5406 if (fmt[i] == 'e')
5407 find_mem_givs (XEXP (x, i), insn, not_every_iteration, maybe_multiple,
5408 loop_start, loop_end);
5409 else if (fmt[i] == 'E')
5410 for (j = 0; j < XVECLEN (x, i); j++)
5411 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
5412 maybe_multiple, loop_start, loop_end);
5415 /* Fill in the data about one biv update.
5416 V is the `struct induction' in which we record the biv. (It is
5417 allocated by the caller, with alloca.)
5418 INSN is the insn that sets it.
5419 DEST_REG is the biv's reg.
5421 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5422 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5423 being set to INC_VAL.
5425 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5426 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5427 can be executed more than once per iteration. If MAYBE_MULTIPLE
5428 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5429 executed exactly once per iteration. */
5431 static void
5432 record_biv (v, insn, dest_reg, inc_val, mult_val, location,
5433 not_every_iteration, maybe_multiple)
5434 struct induction *v;
5435 rtx insn;
5436 rtx dest_reg;
5437 rtx inc_val;
5438 rtx mult_val;
5439 rtx *location;
5440 int not_every_iteration;
5441 int maybe_multiple;
5443 struct iv_class *bl;
5445 v->insn = insn;
5446 v->src_reg = dest_reg;
5447 v->dest_reg = dest_reg;
5448 v->mult_val = mult_val;
5449 v->add_val = inc_val;
5450 v->location = location;
5451 v->mode = GET_MODE (dest_reg);
5452 v->always_computable = ! not_every_iteration;
5453 v->always_executed = ! not_every_iteration;
5454 v->maybe_multiple = maybe_multiple;
5456 /* Add this to the reg's iv_class, creating a class
5457 if this is the first incrementation of the reg. */
5459 bl = reg_biv_class[REGNO (dest_reg)];
5460 if (bl == 0)
5462 /* Create and initialize new iv_class. */
5464 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
5466 bl->regno = REGNO (dest_reg);
5467 bl->biv = 0;
5468 bl->giv = 0;
5469 bl->biv_count = 0;
5470 bl->giv_count = 0;
5472 /* Set initial value to the reg itself. */
5473 bl->initial_value = dest_reg;
5474 /* We haven't seen the initializing insn yet */
5475 bl->init_insn = 0;
5476 bl->init_set = 0;
5477 bl->initial_test = 0;
5478 bl->incremented = 0;
5479 bl->eliminable = 0;
5480 bl->nonneg = 0;
5481 bl->reversed = 0;
5482 bl->total_benefit = 0;
5484 /* Add this class to loop_iv_list. */
5485 bl->next = loop_iv_list;
5486 loop_iv_list = bl;
5488 /* Put it in the array of biv register classes. */
5489 reg_biv_class[REGNO (dest_reg)] = bl;
5492 /* Update IV_CLASS entry for this biv. */
5493 v->next_iv = bl->biv;
5494 bl->biv = v;
5495 bl->biv_count++;
5496 if (mult_val == const1_rtx)
5497 bl->incremented = 1;
5499 if (loop_dump_stream)
5501 fprintf (loop_dump_stream,
5502 "Insn %d: possible biv, reg %d,",
5503 INSN_UID (insn), REGNO (dest_reg));
5504 if (GET_CODE (inc_val) == CONST_INT)
5506 fprintf (loop_dump_stream, " const =");
5507 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
5508 fputc ('\n', loop_dump_stream);
5510 else
5512 fprintf (loop_dump_stream, " const = ");
5513 print_rtl (loop_dump_stream, inc_val);
5514 fprintf (loop_dump_stream, "\n");
5519 /* Fill in the data about one giv.
5520 V is the `struct induction' in which we record the giv. (It is
5521 allocated by the caller, with alloca.)
5522 INSN is the insn that sets it.
5523 BENEFIT estimates the savings from deleting this insn.
5524 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5525 into a register or is used as a memory address.
5527 SRC_REG is the biv reg which the giv is computed from.
5528 DEST_REG is the giv's reg (if the giv is stored in a reg).
5529 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5530 LOCATION points to the place where this giv's value appears in INSN. */
5532 static void
5533 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
5534 type, not_every_iteration, maybe_multiple, location, loop_start,
5535 loop_end)
5536 struct induction *v;
5537 rtx insn;
5538 rtx src_reg;
5539 rtx dest_reg;
5540 rtx mult_val, add_val;
5541 int benefit;
5542 enum g_types type;
5543 int not_every_iteration, maybe_multiple;
5544 rtx *location;
5545 rtx loop_start, loop_end;
5547 struct induction *b;
5548 struct iv_class *bl;
5549 rtx set = single_set (insn);
5551 v->insn = insn;
5552 v->src_reg = src_reg;
5553 v->giv_type = type;
5554 v->dest_reg = dest_reg;
5555 v->mult_val = mult_val;
5556 v->add_val = add_val;
5557 v->benefit = benefit;
5558 v->location = location;
5559 v->cant_derive = 0;
5560 v->combined_with = 0;
5561 v->maybe_multiple = maybe_multiple;
5562 v->maybe_dead = 0;
5563 v->derive_adjustment = 0;
5564 v->same = 0;
5565 v->ignore = 0;
5566 v->new_reg = 0;
5567 v->final_value = 0;
5568 v->same_insn = 0;
5569 v->auto_inc_opt = 0;
5570 v->unrolled = 0;
5571 v->shared = 0;
5572 v->derived_from = 0;
5573 v->last_use = 0;
5575 /* The v->always_computable field is used in update_giv_derive, to
5576 determine whether a giv can be used to derive another giv. For a
5577 DEST_REG giv, INSN computes a new value for the giv, so its value
5578 isn't computable if INSN insn't executed every iteration.
5579 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5580 it does not compute a new value. Hence the value is always computable
5581 regardless of whether INSN is executed each iteration. */
5583 if (type == DEST_ADDR)
5584 v->always_computable = 1;
5585 else
5586 v->always_computable = ! not_every_iteration;
5588 v->always_executed = ! not_every_iteration;
5590 if (type == DEST_ADDR)
5592 v->mode = GET_MODE (*location);
5593 v->lifetime = 1;
5595 else /* type == DEST_REG */
5597 v->mode = GET_MODE (SET_DEST (set));
5599 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
5600 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
5602 /* If the lifetime is zero, it means that this register is
5603 really a dead store. So mark this as a giv that can be
5604 ignored. This will not prevent the biv from being eliminated. */
5605 if (v->lifetime == 0)
5606 v->ignore = 1;
5608 REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
5609 REG_IV_INFO (REGNO (dest_reg)) = v;
5612 /* Add the giv to the class of givs computed from one biv. */
5614 bl = reg_biv_class[REGNO (src_reg)];
5615 if (bl)
5617 v->next_iv = bl->giv;
5618 bl->giv = v;
5619 /* Don't count DEST_ADDR. This is supposed to count the number of
5620 insns that calculate givs. */
5621 if (type == DEST_REG)
5622 bl->giv_count++;
5623 bl->total_benefit += benefit;
5625 else
5626 /* Fatal error, biv missing for this giv? */
5627 abort ();
5629 if (type == DEST_ADDR)
5630 v->replaceable = 1;
5631 else
5633 /* The giv can be replaced outright by the reduced register only if all
5634 of the following conditions are true:
5635 - the insn that sets the giv is always executed on any iteration
5636 on which the giv is used at all
5637 (there are two ways to deduce this:
5638 either the insn is executed on every iteration,
5639 or all uses follow that insn in the same basic block),
5640 - the giv is not used outside the loop
5641 - no assignments to the biv occur during the giv's lifetime. */
5643 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5644 /* Previous line always fails if INSN was moved by loop opt. */
5645 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
5646 && (! not_every_iteration
5647 || last_use_this_basic_block (dest_reg, insn)))
5649 /* Now check that there are no assignments to the biv within the
5650 giv's lifetime. This requires two separate checks. */
5652 /* Check each biv update, and fail if any are between the first
5653 and last use of the giv.
5655 If this loop contains an inner loop that was unrolled, then
5656 the insn modifying the biv may have been emitted by the loop
5657 unrolling code, and hence does not have a valid luid. Just
5658 mark the biv as not replaceable in this case. It is not very
5659 useful as a biv, because it is used in two different loops.
5660 It is very unlikely that we would be able to optimize the giv
5661 using this biv anyways. */
5663 v->replaceable = 1;
5664 for (b = bl->biv; b; b = b->next_iv)
5666 if (INSN_UID (b->insn) >= max_uid_for_loop
5667 || ((uid_luid[INSN_UID (b->insn)]
5668 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
5669 && (uid_luid[INSN_UID (b->insn)]
5670 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
5672 v->replaceable = 0;
5673 v->not_replaceable = 1;
5674 break;
5678 /* If there are any backwards branches that go from after the
5679 biv update to before it, then this giv is not replaceable. */
5680 if (v->replaceable)
5681 for (b = bl->biv; b; b = b->next_iv)
5682 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
5684 v->replaceable = 0;
5685 v->not_replaceable = 1;
5686 break;
5689 else
5691 /* May still be replaceable, we don't have enough info here to
5692 decide. */
5693 v->replaceable = 0;
5694 v->not_replaceable = 0;
5698 /* Record whether the add_val contains a const_int, for later use by
5699 combine_givs. */
5701 rtx tem = add_val;
5703 v->no_const_addval = 1;
5704 if (tem == const0_rtx)
5706 else if (GET_CODE (tem) == CONST_INT)
5707 v->no_const_addval = 0;
5708 else if (GET_CODE (tem) == PLUS)
5710 while (1)
5712 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5713 tem = XEXP (tem, 0);
5714 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5715 tem = XEXP (tem, 1);
5716 else
5717 break;
5719 if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
5720 v->no_const_addval = 0;
5724 if (loop_dump_stream)
5726 if (type == DEST_REG)
5727 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
5728 INSN_UID (insn), REGNO (dest_reg));
5729 else
5730 fprintf (loop_dump_stream, "Insn %d: dest address",
5731 INSN_UID (insn));
5733 fprintf (loop_dump_stream, " src reg %d benefit %d",
5734 REGNO (src_reg), v->benefit);
5735 fprintf (loop_dump_stream, " lifetime %d",
5736 v->lifetime);
5738 if (v->replaceable)
5739 fprintf (loop_dump_stream, " replaceable");
5741 if (v->no_const_addval)
5742 fprintf (loop_dump_stream, " ncav");
5744 if (GET_CODE (mult_val) == CONST_INT)
5746 fprintf (loop_dump_stream, " mult ");
5747 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
5749 else
5751 fprintf (loop_dump_stream, " mult ");
5752 print_rtl (loop_dump_stream, mult_val);
5755 if (GET_CODE (add_val) == CONST_INT)
5757 fprintf (loop_dump_stream, " add ");
5758 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
5760 else
5762 fprintf (loop_dump_stream, " add ");
5763 print_rtl (loop_dump_stream, add_val);
5767 if (loop_dump_stream)
5768 fprintf (loop_dump_stream, "\n");
5773 /* All this does is determine whether a giv can be made replaceable because
5774 its final value can be calculated. This code can not be part of record_giv
5775 above, because final_giv_value requires that the number of loop iterations
5776 be known, and that can not be accurately calculated until after all givs
5777 have been identified. */
5779 static void
5780 check_final_value (v, loop_start, loop_end, n_iterations)
5781 struct induction *v;
5782 rtx loop_start, loop_end;
5783 unsigned HOST_WIDE_INT n_iterations;
5785 struct iv_class *bl;
5786 rtx final_value = 0;
5788 bl = reg_biv_class[REGNO (v->src_reg)];
5790 /* DEST_ADDR givs will never reach here, because they are always marked
5791 replaceable above in record_giv. */
5793 /* The giv can be replaced outright by the reduced register only if all
5794 of the following conditions are true:
5795 - the insn that sets the giv is always executed on any iteration
5796 on which the giv is used at all
5797 (there are two ways to deduce this:
5798 either the insn is executed on every iteration,
5799 or all uses follow that insn in the same basic block),
5800 - its final value can be calculated (this condition is different
5801 than the one above in record_giv)
5802 - no assignments to the biv occur during the giv's lifetime. */
5804 #if 0
5805 /* This is only called now when replaceable is known to be false. */
5806 /* Clear replaceable, so that it won't confuse final_giv_value. */
5807 v->replaceable = 0;
5808 #endif
5810 if ((final_value = final_giv_value (v, loop_start, loop_end, n_iterations))
5811 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5813 int biv_increment_seen = 0;
5814 rtx p = v->insn;
5815 rtx last_giv_use;
5817 v->replaceable = 1;
5819 /* When trying to determine whether or not a biv increment occurs
5820 during the lifetime of the giv, we can ignore uses of the variable
5821 outside the loop because final_value is true. Hence we can not
5822 use regno_last_uid and regno_first_uid as above in record_giv. */
5824 /* Search the loop to determine whether any assignments to the
5825 biv occur during the giv's lifetime. Start with the insn
5826 that sets the giv, and search around the loop until we come
5827 back to that insn again.
5829 Also fail if there is a jump within the giv's lifetime that jumps
5830 to somewhere outside the lifetime but still within the loop. This
5831 catches spaghetti code where the execution order is not linear, and
5832 hence the above test fails. Here we assume that the giv lifetime
5833 does not extend from one iteration of the loop to the next, so as
5834 to make the test easier. Since the lifetime isn't known yet,
5835 this requires two loops. See also record_giv above. */
5837 last_giv_use = v->insn;
5839 while (1)
5841 p = NEXT_INSN (p);
5842 if (p == loop_end)
5843 p = NEXT_INSN (loop_start);
5844 if (p == v->insn)
5845 break;
5847 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5848 || GET_CODE (p) == CALL_INSN)
5850 if (biv_increment_seen)
5852 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5854 v->replaceable = 0;
5855 v->not_replaceable = 1;
5856 break;
5859 else if (reg_set_p (v->src_reg, PATTERN (p)))
5860 biv_increment_seen = 1;
5861 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5862 last_giv_use = p;
5866 /* Now that the lifetime of the giv is known, check for branches
5867 from within the lifetime to outside the lifetime if it is still
5868 replaceable. */
5870 if (v->replaceable)
5872 p = v->insn;
5873 while (1)
5875 p = NEXT_INSN (p);
5876 if (p == loop_end)
5877 p = NEXT_INSN (loop_start);
5878 if (p == last_giv_use)
5879 break;
5881 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5882 && LABEL_NAME (JUMP_LABEL (p))
5883 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5884 && loop_insn_first_p (loop_start, JUMP_LABEL (p)))
5885 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5886 && loop_insn_first_p (JUMP_LABEL (p), loop_end))))
5888 v->replaceable = 0;
5889 v->not_replaceable = 1;
5891 if (loop_dump_stream)
5892 fprintf (loop_dump_stream,
5893 "Found branch outside giv lifetime.\n");
5895 break;
5900 /* If it is replaceable, then save the final value. */
5901 if (v->replaceable)
5902 v->final_value = final_value;
5905 if (loop_dump_stream && v->replaceable)
5906 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5907 INSN_UID (v->insn), REGNO (v->dest_reg));
5910 /* Update the status of whether a giv can derive other givs.
5912 We need to do something special if there is or may be an update to the biv
5913 between the time the giv is defined and the time it is used to derive
5914 another giv.
5916 In addition, a giv that is only conditionally set is not allowed to
5917 derive another giv once a label has been passed.
5919 The cases we look at are when a label or an update to a biv is passed. */
5921 static void
5922 update_giv_derive (p)
5923 rtx p;
5925 struct iv_class *bl;
5926 struct induction *biv, *giv;
5927 rtx tem;
5928 int dummy;
5930 /* Search all IV classes, then all bivs, and finally all givs.
5932 There are three cases we are concerned with. First we have the situation
5933 of a giv that is only updated conditionally. In that case, it may not
5934 derive any givs after a label is passed.
5936 The second case is when a biv update occurs, or may occur, after the
5937 definition of a giv. For certain biv updates (see below) that are
5938 known to occur between the giv definition and use, we can adjust the
5939 giv definition. For others, or when the biv update is conditional,
5940 we must prevent the giv from deriving any other givs. There are two
5941 sub-cases within this case.
5943 If this is a label, we are concerned with any biv update that is done
5944 conditionally, since it may be done after the giv is defined followed by
5945 a branch here (actually, we need to pass both a jump and a label, but
5946 this extra tracking doesn't seem worth it).
5948 If this is a jump, we are concerned about any biv update that may be
5949 executed multiple times. We are actually only concerned about
5950 backward jumps, but it is probably not worth performing the test
5951 on the jump again here.
5953 If this is a biv update, we must adjust the giv status to show that a
5954 subsequent biv update was performed. If this adjustment cannot be done,
5955 the giv cannot derive further givs. */
5957 for (bl = loop_iv_list; bl; bl = bl->next)
5958 for (biv = bl->biv; biv; biv = biv->next_iv)
5959 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5960 || biv->insn == p)
5962 for (giv = bl->giv; giv; giv = giv->next_iv)
5964 /* If cant_derive is already true, there is no point in
5965 checking all of these conditions again. */
5966 if (giv->cant_derive)
5967 continue;
5969 /* If this giv is conditionally set and we have passed a label,
5970 it cannot derive anything. */
5971 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5972 giv->cant_derive = 1;
5974 /* Skip givs that have mult_val == 0, since
5975 they are really invariants. Also skip those that are
5976 replaceable, since we know their lifetime doesn't contain
5977 any biv update. */
5978 else if (giv->mult_val == const0_rtx || giv->replaceable)
5979 continue;
5981 /* The only way we can allow this giv to derive another
5982 is if this is a biv increment and we can form the product
5983 of biv->add_val and giv->mult_val. In this case, we will
5984 be able to compute a compensation. */
5985 else if (biv->insn == p)
5987 tem = 0;
5989 if (biv->mult_val == const1_rtx)
5990 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5991 biv->add_val,
5992 giv->mult_val),
5993 &dummy);
5995 if (tem && giv->derive_adjustment)
5996 tem = simplify_giv_expr
5997 (gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5998 &dummy);
6000 if (tem)
6001 giv->derive_adjustment = tem;
6002 else
6003 giv->cant_derive = 1;
6005 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6006 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6007 giv->cant_derive = 1;
6012 /* Check whether an insn is an increment legitimate for a basic induction var.
6013 X is the source of insn P, or a part of it.
6014 MODE is the mode in which X should be interpreted.
6016 DEST_REG is the putative biv, also the destination of the insn.
6017 We accept patterns of these forms:
6018 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6019 REG = INVARIANT + REG
6021 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6022 store the additive term into *INC_VAL, and store the place where
6023 we found the additive term into *LOCATION.
6025 If X is an assignment of an invariant into DEST_REG, we set
6026 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6028 We also want to detect a BIV when it corresponds to a variable
6029 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6030 of the variable may be a PLUS that adds a SUBREG of that variable to
6031 an invariant and then sign- or zero-extends the result of the PLUS
6032 into the variable.
6034 Most GIVs in such cases will be in the promoted mode, since that is the
6035 probably the natural computation mode (and almost certainly the mode
6036 used for addresses) on the machine. So we view the pseudo-reg containing
6037 the variable as the BIV, as if it were simply incremented.
6039 Note that treating the entire pseudo as a BIV will result in making
6040 simple increments to any GIVs based on it. However, if the variable
6041 overflows in its declared mode but not its promoted mode, the result will
6042 be incorrect. This is acceptable if the variable is signed, since
6043 overflows in such cases are undefined, but not if it is unsigned, since
6044 those overflows are defined. So we only check for SIGN_EXTEND and
6045 not ZERO_EXTEND.
6047 If we cannot find a biv, we return 0. */
6049 static int
6050 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val, location)
6051 register rtx x;
6052 enum machine_mode mode;
6053 rtx p;
6054 rtx dest_reg;
6055 rtx *inc_val;
6056 rtx *mult_val;
6057 rtx **location;
6059 register enum rtx_code code;
6060 rtx *argp, arg;
6061 rtx insn, set = 0;
6063 code = GET_CODE (x);
6064 *location = NULL;
6065 switch (code)
6067 case PLUS:
6068 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6069 || (GET_CODE (XEXP (x, 0)) == SUBREG
6070 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6071 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6073 argp = &XEXP (x, 1);
6075 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6076 || (GET_CODE (XEXP (x, 1)) == SUBREG
6077 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6078 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6080 argp = &XEXP (x, 0);
6082 else
6083 return 0;
6085 arg = *argp;
6086 if (invariant_p (arg) != 1)
6087 return 0;
6089 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6090 *mult_val = const1_rtx;
6091 *location = argp;
6092 return 1;
6094 case SUBREG:
6095 /* If this is a SUBREG for a promoted variable, check the inner
6096 value. */
6097 if (SUBREG_PROMOTED_VAR_P (x))
6098 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
6099 dest_reg, p, inc_val, mult_val, location);
6100 return 0;
6102 case REG:
6103 /* If this register is assigned in a previous insn, look at its
6104 source, but don't go outside the loop or past a label. */
6106 insn = p;
6107 while (1)
6109 do {
6110 insn = PREV_INSN (insn);
6111 } while (insn && GET_CODE (insn) == NOTE
6112 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6114 if (!insn)
6115 break;
6116 set = single_set (insn);
6117 if (set == 0)
6118 break;
6120 if ((SET_DEST (set) == x
6121 || (GET_CODE (SET_DEST (set)) == SUBREG
6122 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
6123 <= UNITS_PER_WORD)
6124 && (GET_MODE_CLASS (GET_MODE (SET_DEST (set)))
6125 == MODE_INT)
6126 && SUBREG_REG (SET_DEST (set)) == x))
6127 && basic_induction_var (SET_SRC (set),
6128 (GET_MODE (SET_SRC (set)) == VOIDmode
6129 ? GET_MODE (x)
6130 : GET_MODE (SET_SRC (set))),
6131 dest_reg, insn,
6132 inc_val, mult_val, location))
6133 return 1;
6135 /* ... fall through ... */
6137 /* Can accept constant setting of biv only when inside inner most loop.
6138 Otherwise, a biv of an inner loop may be incorrectly recognized
6139 as a biv of the outer loop,
6140 causing code to be moved INTO the inner loop. */
6141 case MEM:
6142 if (invariant_p (x) != 1)
6143 return 0;
6144 case CONST_INT:
6145 case SYMBOL_REF:
6146 case CONST:
6147 /* convert_modes aborts if we try to convert to or from CCmode, so just
6148 exclude that case. It is very unlikely that a condition code value
6149 would be a useful iterator anyways. */
6150 if (this_loop_info.loops_enclosed == 1
6151 && GET_MODE_CLASS (mode) != MODE_CC
6152 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
6154 /* Possible bug here? Perhaps we don't know the mode of X. */
6155 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6156 *mult_val = const0_rtx;
6157 return 1;
6159 else
6160 return 0;
6162 case SIGN_EXTEND:
6163 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6164 dest_reg, p, inc_val, mult_val, location);
6166 case ASHIFTRT:
6167 /* Similar, since this can be a sign extension. */
6168 for (insn = PREV_INSN (p);
6169 (insn && GET_CODE (insn) == NOTE
6170 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6171 insn = PREV_INSN (insn))
6174 if (insn)
6175 set = single_set (insn);
6177 if (set && SET_DEST (set) == XEXP (x, 0)
6178 && GET_CODE (XEXP (x, 1)) == CONST_INT
6179 && INTVAL (XEXP (x, 1)) >= 0
6180 && GET_CODE (SET_SRC (set)) == ASHIFT
6181 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6182 return basic_induction_var (XEXP (SET_SRC (set), 0),
6183 GET_MODE (XEXP (x, 0)),
6184 dest_reg, insn, inc_val, mult_val,
6185 location);
6186 return 0;
6188 default:
6189 return 0;
6193 /* A general induction variable (giv) is any quantity that is a linear
6194 function of a basic induction variable,
6195 i.e. giv = biv * mult_val + add_val.
6196 The coefficients can be any loop invariant quantity.
6197 A giv need not be computed directly from the biv;
6198 it can be computed by way of other givs. */
6200 /* Determine whether X computes a giv.
6201 If it does, return a nonzero value
6202 which is the benefit from eliminating the computation of X;
6203 set *SRC_REG to the register of the biv that it is computed from;
6204 set *ADD_VAL and *MULT_VAL to the coefficients,
6205 such that the value of X is biv * mult + add; */
6207 static int
6208 general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
6209 rtx x;
6210 rtx *src_reg;
6211 rtx *add_val;
6212 rtx *mult_val;
6213 int is_addr;
6214 int *pbenefit;
6216 rtx orig_x = x;
6217 char *storage;
6219 /* If this is an invariant, forget it, it isn't a giv. */
6220 if (invariant_p (x) == 1)
6221 return 0;
6223 /* See if the expression could be a giv and get its form.
6224 Mark our place on the obstack in case we don't find a giv. */
6225 storage = (char *) oballoc (0);
6226 *pbenefit = 0;
6227 x = simplify_giv_expr (x, pbenefit);
6228 if (x == 0)
6230 obfree (storage);
6231 return 0;
6234 switch (GET_CODE (x))
6236 case USE:
6237 case CONST_INT:
6238 /* Since this is now an invariant and wasn't before, it must be a giv
6239 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6240 with. */
6241 *src_reg = loop_iv_list->biv->dest_reg;
6242 *mult_val = const0_rtx;
6243 *add_val = x;
6244 break;
6246 case REG:
6247 /* This is equivalent to a BIV. */
6248 *src_reg = x;
6249 *mult_val = const1_rtx;
6250 *add_val = const0_rtx;
6251 break;
6253 case PLUS:
6254 /* Either (plus (biv) (invar)) or
6255 (plus (mult (biv) (invar_1)) (invar_2)). */
6256 if (GET_CODE (XEXP (x, 0)) == MULT)
6258 *src_reg = XEXP (XEXP (x, 0), 0);
6259 *mult_val = XEXP (XEXP (x, 0), 1);
6261 else
6263 *src_reg = XEXP (x, 0);
6264 *mult_val = const1_rtx;
6266 *add_val = XEXP (x, 1);
6267 break;
6269 case MULT:
6270 /* ADD_VAL is zero. */
6271 *src_reg = XEXP (x, 0);
6272 *mult_val = XEXP (x, 1);
6273 *add_val = const0_rtx;
6274 break;
6276 default:
6277 abort ();
6280 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6281 unless they are CONST_INT). */
6282 if (GET_CODE (*add_val) == USE)
6283 *add_val = XEXP (*add_val, 0);
6284 if (GET_CODE (*mult_val) == USE)
6285 *mult_val = XEXP (*mult_val, 0);
6287 if (is_addr)
6289 #ifdef ADDRESS_COST
6290 *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
6291 #else
6292 *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
6293 #endif
6295 else
6296 *pbenefit += rtx_cost (orig_x, SET);
6298 /* Always return true if this is a giv so it will be detected as such,
6299 even if the benefit is zero or negative. This allows elimination
6300 of bivs that might otherwise not be eliminated. */
6301 return 1;
6304 /* Given an expression, X, try to form it as a linear function of a biv.
6305 We will canonicalize it to be of the form
6306 (plus (mult (BIV) (invar_1))
6307 (invar_2))
6308 with possible degeneracies.
6310 The invariant expressions must each be of a form that can be used as a
6311 machine operand. We surround then with a USE rtx (a hack, but localized
6312 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6313 routine; it is the caller's responsibility to strip them.
6315 If no such canonicalization is possible (i.e., two biv's are used or an
6316 expression that is neither invariant nor a biv or giv), this routine
6317 returns 0.
6319 For a non-zero return, the result will have a code of CONST_INT, USE,
6320 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6322 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6324 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6325 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6326 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
6327 static int cmp_recombine_givs_stats PARAMS ((const PTR, const PTR));
6329 static rtx
6330 simplify_giv_expr (x, benefit)
6331 rtx x;
6332 int *benefit;
6334 enum machine_mode mode = GET_MODE (x);
6335 rtx arg0, arg1;
6336 rtx tem;
6338 /* If this is not an integer mode, or if we cannot do arithmetic in this
6339 mode, this can't be a giv. */
6340 if (mode != VOIDmode
6341 && (GET_MODE_CLASS (mode) != MODE_INT
6342 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6343 return NULL_RTX;
6345 switch (GET_CODE (x))
6347 case PLUS:
6348 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
6349 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
6350 if (arg0 == 0 || arg1 == 0)
6351 return NULL_RTX;
6353 /* Put constant last, CONST_INT last if both constant. */
6354 if ((GET_CODE (arg0) == USE
6355 || GET_CODE (arg0) == CONST_INT)
6356 && ! ((GET_CODE (arg0) == USE
6357 && GET_CODE (arg1) == USE)
6358 || GET_CODE (arg1) == CONST_INT))
6359 tem = arg0, arg0 = arg1, arg1 = tem;
6361 /* Handle addition of zero, then addition of an invariant. */
6362 if (arg1 == const0_rtx)
6363 return arg0;
6364 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6365 switch (GET_CODE (arg0))
6367 case CONST_INT:
6368 case USE:
6369 /* Adding two invariants must result in an invariant, so enclose
6370 addition operation inside a USE and return it. */
6371 if (GET_CODE (arg0) == USE)
6372 arg0 = XEXP (arg0, 0);
6373 if (GET_CODE (arg1) == USE)
6374 arg1 = XEXP (arg1, 0);
6376 if (GET_CODE (arg0) == CONST_INT)
6377 tem = arg0, arg0 = arg1, arg1 = tem;
6378 if (GET_CODE (arg1) == CONST_INT)
6379 tem = sge_plus_constant (arg0, arg1);
6380 else
6381 tem = sge_plus (mode, arg0, arg1);
6383 if (GET_CODE (tem) != CONST_INT)
6384 tem = gen_rtx_USE (mode, tem);
6385 return tem;
6387 case REG:
6388 case MULT:
6389 /* biv + invar or mult + invar. Return sum. */
6390 return gen_rtx_PLUS (mode, arg0, arg1);
6392 case PLUS:
6393 /* (a + invar_1) + invar_2. Associate. */
6394 return
6395 simplify_giv_expr (gen_rtx_PLUS (mode,
6396 XEXP (arg0, 0),
6397 gen_rtx_PLUS (mode,
6398 XEXP (arg0, 1),
6399 arg1)),
6400 benefit);
6402 default:
6403 abort ();
6406 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6407 MULT to reduce cases. */
6408 if (GET_CODE (arg0) == REG)
6409 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6410 if (GET_CODE (arg1) == REG)
6411 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6413 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6414 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6415 Recurse to associate the second PLUS. */
6416 if (GET_CODE (arg1) == MULT)
6417 tem = arg0, arg0 = arg1, arg1 = tem;
6419 if (GET_CODE (arg1) == PLUS)
6420 return
6421 simplify_giv_expr (gen_rtx_PLUS (mode,
6422 gen_rtx_PLUS (mode, arg0,
6423 XEXP (arg1, 0)),
6424 XEXP (arg1, 1)),
6425 benefit);
6427 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6428 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6429 return NULL_RTX;
6431 if (!rtx_equal_p (arg0, arg1))
6432 return NULL_RTX;
6434 return simplify_giv_expr (gen_rtx_MULT (mode,
6435 XEXP (arg0, 0),
6436 gen_rtx_PLUS (mode,
6437 XEXP (arg0, 1),
6438 XEXP (arg1, 1))),
6439 benefit);
6441 case MINUS:
6442 /* Handle "a - b" as "a + b * (-1)". */
6443 return simplify_giv_expr (gen_rtx_PLUS (mode,
6444 XEXP (x, 0),
6445 gen_rtx_MULT (mode,
6446 XEXP (x, 1),
6447 constm1_rtx)),
6448 benefit);
6450 case MULT:
6451 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
6452 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
6453 if (arg0 == 0 || arg1 == 0)
6454 return NULL_RTX;
6456 /* Put constant last, CONST_INT last if both constant. */
6457 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6458 && GET_CODE (arg1) != CONST_INT)
6459 tem = arg0, arg0 = arg1, arg1 = tem;
6461 /* If second argument is not now constant, not giv. */
6462 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6463 return NULL_RTX;
6465 /* Handle multiply by 0 or 1. */
6466 if (arg1 == const0_rtx)
6467 return const0_rtx;
6469 else if (arg1 == const1_rtx)
6470 return arg0;
6472 switch (GET_CODE (arg0))
6474 case REG:
6475 /* biv * invar. Done. */
6476 return gen_rtx_MULT (mode, arg0, arg1);
6478 case CONST_INT:
6479 /* Product of two constants. */
6480 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6482 case USE:
6483 /* invar * invar. It is a giv, but very few of these will
6484 actually pay off, so limit to simple registers. */
6485 if (GET_CODE (arg1) != CONST_INT)
6486 return NULL_RTX;
6488 arg0 = XEXP (arg0, 0);
6489 if (GET_CODE (arg0) == REG)
6490 tem = gen_rtx_MULT (mode, arg0, arg1);
6491 else if (GET_CODE (arg0) == MULT
6492 && GET_CODE (XEXP (arg0, 0)) == REG
6493 && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
6495 tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
6496 GEN_INT (INTVAL (XEXP (arg0, 1))
6497 * INTVAL (arg1)));
6499 else
6500 return NULL_RTX;
6501 return gen_rtx_USE (mode, tem);
6503 case MULT:
6504 /* (a * invar_1) * invar_2. Associate. */
6505 return simplify_giv_expr (gen_rtx_MULT (mode,
6506 XEXP (arg0, 0),
6507 gen_rtx_MULT (mode,
6508 XEXP (arg0, 1),
6509 arg1)),
6510 benefit);
6512 case PLUS:
6513 /* (a + invar_1) * invar_2. Distribute. */
6514 return simplify_giv_expr (gen_rtx_PLUS (mode,
6515 gen_rtx_MULT (mode,
6516 XEXP (arg0, 0),
6517 arg1),
6518 gen_rtx_MULT (mode,
6519 XEXP (arg0, 1),
6520 arg1)),
6521 benefit);
6523 default:
6524 abort ();
6527 case ASHIFT:
6528 /* Shift by constant is multiply by power of two. */
6529 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6530 return 0;
6532 return
6533 simplify_giv_expr (gen_rtx_MULT (mode,
6534 XEXP (x, 0),
6535 GEN_INT ((HOST_WIDE_INT) 1
6536 << INTVAL (XEXP (x, 1)))),
6537 benefit);
6539 case NEG:
6540 /* "-a" is "a * (-1)" */
6541 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6542 benefit);
6544 case NOT:
6545 /* "~a" is "-a - 1". Silly, but easy. */
6546 return simplify_giv_expr (gen_rtx_MINUS (mode,
6547 gen_rtx_NEG (mode, XEXP (x, 0)),
6548 const1_rtx),
6549 benefit);
6551 case USE:
6552 /* Already in proper form for invariant. */
6553 return x;
6555 case REG:
6556 /* If this is a new register, we can't deal with it. */
6557 if (REGNO (x) >= max_reg_before_loop)
6558 return 0;
6560 /* Check for biv or giv. */
6561 switch (REG_IV_TYPE (REGNO (x)))
6563 case BASIC_INDUCT:
6564 return x;
6565 case GENERAL_INDUCT:
6567 struct induction *v = REG_IV_INFO (REGNO (x));
6569 /* Form expression from giv and add benefit. Ensure this giv
6570 can derive another and subtract any needed adjustment if so. */
6571 *benefit += v->benefit;
6572 if (v->cant_derive)
6573 return 0;
6575 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6576 v->src_reg, v->mult_val),
6577 v->add_val);
6579 if (v->derive_adjustment)
6580 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6581 return simplify_giv_expr (tem, benefit);
6584 default:
6585 /* If it isn't an induction variable, and it is invariant, we
6586 may be able to simplify things further by looking through
6587 the bits we just moved outside the loop. */
6588 if (invariant_p (x) == 1)
6590 struct movable *m;
6592 for (m = the_movables; m ; m = m->next)
6593 if (rtx_equal_p (x, m->set_dest))
6595 /* Ok, we found a match. Substitute and simplify. */
6597 /* If we match another movable, we must use that, as
6598 this one is going away. */
6599 if (m->match)
6600 return simplify_giv_expr (m->match->set_dest, benefit);
6602 /* If consec is non-zero, this is a member of a group of
6603 instructions that were moved together. We handle this
6604 case only to the point of seeking to the last insn and
6605 looking for a REG_EQUAL. Fail if we don't find one. */
6606 if (m->consec != 0)
6608 int i = m->consec;
6609 tem = m->insn;
6610 do { tem = NEXT_INSN (tem); } while (--i > 0);
6612 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6613 if (tem)
6614 tem = XEXP (tem, 0);
6616 else
6618 tem = single_set (m->insn);
6619 if (tem)
6620 tem = SET_SRC (tem);
6623 if (tem)
6625 /* What we are most interested in is pointer
6626 arithmetic on invariants -- only take
6627 patterns we may be able to do something with. */
6628 if (GET_CODE (tem) == PLUS
6629 || GET_CODE (tem) == MULT
6630 || GET_CODE (tem) == ASHIFT
6631 || GET_CODE (tem) == CONST_INT
6632 || GET_CODE (tem) == SYMBOL_REF)
6634 tem = simplify_giv_expr (tem, benefit);
6635 if (tem)
6636 return tem;
6638 else if (GET_CODE (tem) == CONST
6639 && GET_CODE (XEXP (tem, 0)) == PLUS
6640 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6641 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6643 tem = simplify_giv_expr (XEXP (tem, 0), benefit);
6644 if (tem)
6645 return tem;
6648 break;
6651 break;
6654 /* Fall through to general case. */
6655 default:
6656 /* If invariant, return as USE (unless CONST_INT).
6657 Otherwise, not giv. */
6658 if (GET_CODE (x) == USE)
6659 x = XEXP (x, 0);
6661 if (invariant_p (x) == 1)
6663 if (GET_CODE (x) == CONST_INT)
6664 return x;
6665 if (GET_CODE (x) == CONST
6666 && GET_CODE (XEXP (x, 0)) == PLUS
6667 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6668 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6669 x = XEXP (x, 0);
6670 return gen_rtx_USE (mode, x);
6672 else
6673 return 0;
6677 /* This routine folds invariants such that there is only ever one
6678 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6680 static rtx
6681 sge_plus_constant (x, c)
6682 rtx x, c;
6684 if (GET_CODE (x) == CONST_INT)
6685 return GEN_INT (INTVAL (x) + INTVAL (c));
6686 else if (GET_CODE (x) != PLUS)
6687 return gen_rtx_PLUS (GET_MODE (x), x, c);
6688 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6690 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6691 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6693 else if (GET_CODE (XEXP (x, 0)) == PLUS
6694 || GET_CODE (XEXP (x, 1)) != PLUS)
6696 return gen_rtx_PLUS (GET_MODE (x),
6697 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6699 else
6701 return gen_rtx_PLUS (GET_MODE (x),
6702 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6706 static rtx
6707 sge_plus (mode, x, y)
6708 enum machine_mode mode;
6709 rtx x, y;
6711 while (GET_CODE (y) == PLUS)
6713 rtx a = XEXP (y, 0);
6714 if (GET_CODE (a) == CONST_INT)
6715 x = sge_plus_constant (x, a);
6716 else
6717 x = gen_rtx_PLUS (mode, x, a);
6718 y = XEXP (y, 1);
6720 if (GET_CODE (y) == CONST_INT)
6721 x = sge_plus_constant (x, y);
6722 else
6723 x = gen_rtx_PLUS (mode, x, y);
6724 return x;
6727 /* Help detect a giv that is calculated by several consecutive insns;
6728 for example,
6729 giv = biv * M
6730 giv = giv + A
6731 The caller has already identified the first insn P as having a giv as dest;
6732 we check that all other insns that set the same register follow
6733 immediately after P, that they alter nothing else,
6734 and that the result of the last is still a giv.
6736 The value is 0 if the reg set in P is not really a giv.
6737 Otherwise, the value is the amount gained by eliminating
6738 all the consecutive insns that compute the value.
6740 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6741 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6743 The coefficients of the ultimate giv value are stored in
6744 *MULT_VAL and *ADD_VAL. */
6746 static int
6747 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
6748 add_val, mult_val, last_consec_insn)
6749 int first_benefit;
6750 rtx p;
6751 rtx src_reg;
6752 rtx dest_reg;
6753 rtx *add_val;
6754 rtx *mult_val;
6755 rtx *last_consec_insn;
6757 int count;
6758 enum rtx_code code;
6759 int benefit;
6760 rtx temp;
6761 rtx set;
6763 /* Indicate that this is a giv so that we can update the value produced in
6764 each insn of the multi-insn sequence.
6766 This induction structure will be used only by the call to
6767 general_induction_var below, so we can allocate it on our stack.
6768 If this is a giv, our caller will replace the induct var entry with
6769 a new induction structure. */
6770 struct induction *v
6771 = (struct induction *) alloca (sizeof (struct induction));
6772 v->src_reg = src_reg;
6773 v->mult_val = *mult_val;
6774 v->add_val = *add_val;
6775 v->benefit = first_benefit;
6776 v->cant_derive = 0;
6777 v->derive_adjustment = 0;
6779 REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT;
6780 REG_IV_INFO (REGNO (dest_reg)) = v;
6782 count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
6784 while (count > 0)
6786 p = NEXT_INSN (p);
6787 code = GET_CODE (p);
6789 /* If libcall, skip to end of call sequence. */
6790 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6791 p = XEXP (temp, 0);
6793 if (code == INSN
6794 && (set = single_set (p))
6795 && GET_CODE (SET_DEST (set)) == REG
6796 && SET_DEST (set) == dest_reg
6797 && (general_induction_var (SET_SRC (set), &src_reg,
6798 add_val, mult_val, 0, &benefit)
6799 /* Giv created by equivalent expression. */
6800 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6801 && general_induction_var (XEXP (temp, 0), &src_reg,
6802 add_val, mult_val, 0, &benefit)))
6803 && src_reg == v->src_reg)
6805 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6806 benefit += libcall_benefit (p);
6808 count--;
6809 v->mult_val = *mult_val;
6810 v->add_val = *add_val;
6811 v->benefit = benefit;
6813 else if (code != NOTE)
6815 /* Allow insns that set something other than this giv to a
6816 constant. Such insns are needed on machines which cannot
6817 include long constants and should not disqualify a giv. */
6818 if (code == INSN
6819 && (set = single_set (p))
6820 && SET_DEST (set) != dest_reg
6821 && CONSTANT_P (SET_SRC (set)))
6822 continue;
6824 REG_IV_TYPE (REGNO (dest_reg)) = UNKNOWN_INDUCT;
6825 return 0;
6829 *last_consec_insn = p;
6830 return v->benefit;
6833 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6834 represented by G1. If no such expression can be found, or it is clear that
6835 it cannot possibly be a valid address, 0 is returned.
6837 To perform the computation, we note that
6838 G1 = x * v + a and
6839 G2 = y * v + b
6840 where `v' is the biv.
6842 So G2 = (y/b) * G1 + (b - a*y/x).
6844 Note that MULT = y/x.
6846 Update: A and B are now allowed to be additive expressions such that
6847 B contains all variables in A. That is, computing B-A will not require
6848 subtracting variables. */
6850 static rtx
6851 express_from_1 (a, b, mult)
6852 rtx a, b, mult;
6854 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6856 if (mult == const0_rtx)
6857 return b;
6859 /* If MULT is not 1, we cannot handle A with non-constants, since we
6860 would then be required to subtract multiples of the registers in A.
6861 This is theoretically possible, and may even apply to some Fortran
6862 constructs, but it is a lot of work and we do not attempt it here. */
6864 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6865 return NULL_RTX;
6867 /* In general these structures are sorted top to bottom (down the PLUS
6868 chain), but not left to right across the PLUS. If B is a higher
6869 order giv than A, we can strip one level and recurse. If A is higher
6870 order, we'll eventually bail out, but won't know that until the end.
6871 If they are the same, we'll strip one level around this loop. */
6873 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6875 rtx ra, rb, oa, ob, tmp;
6877 ra = XEXP (a, 0), oa = XEXP (a, 1);
6878 if (GET_CODE (ra) == PLUS)
6879 tmp = ra, ra = oa, oa = tmp;
6881 rb = XEXP (b, 0), ob = XEXP (b, 1);
6882 if (GET_CODE (rb) == PLUS)
6883 tmp = rb, rb = ob, ob = tmp;
6885 if (rtx_equal_p (ra, rb))
6886 /* We matched: remove one reg completely. */
6887 a = oa, b = ob;
6888 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6889 /* An alternate match. */
6890 a = oa, b = rb;
6891 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6892 /* An alternate match. */
6893 a = ra, b = ob;
6894 else
6896 /* Indicates an extra register in B. Strip one level from B and
6897 recurse, hoping B was the higher order expression. */
6898 ob = express_from_1 (a, ob, mult);
6899 if (ob == NULL_RTX)
6900 return NULL_RTX;
6901 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6905 /* Here we are at the last level of A, go through the cases hoping to
6906 get rid of everything but a constant. */
6908 if (GET_CODE (a) == PLUS)
6910 rtx ra, oa;
6912 ra = XEXP (a, 0), oa = XEXP (a, 1);
6913 if (rtx_equal_p (oa, b))
6914 oa = ra;
6915 else if (!rtx_equal_p (ra, b))
6916 return NULL_RTX;
6918 if (GET_CODE (oa) != CONST_INT)
6919 return NULL_RTX;
6921 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6923 else if (GET_CODE (a) == CONST_INT)
6925 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6927 else if (GET_CODE (b) == PLUS)
6929 if (rtx_equal_p (a, XEXP (b, 0)))
6930 return XEXP (b, 1);
6931 else if (rtx_equal_p (a, XEXP (b, 1)))
6932 return XEXP (b, 0);
6933 else
6934 return NULL_RTX;
6936 else if (rtx_equal_p (a, b))
6937 return const0_rtx;
6939 return NULL_RTX;
6943 express_from (g1, g2)
6944 struct induction *g1, *g2;
6946 rtx mult, add;
6948 /* The value that G1 will be multiplied by must be a constant integer. Also,
6949 the only chance we have of getting a valid address is if b*c/a (see above
6950 for notation) is also an integer. */
6951 if (GET_CODE (g1->mult_val) == CONST_INT
6952 && GET_CODE (g2->mult_val) == CONST_INT)
6954 if (g1->mult_val == const0_rtx
6955 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6956 return NULL_RTX;
6957 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6959 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6960 mult = const1_rtx;
6961 else
6963 /* ??? Find out if the one is a multiple of the other? */
6964 return NULL_RTX;
6967 add = express_from_1 (g1->add_val, g2->add_val, mult);
6968 if (add == NULL_RTX)
6970 /* Failed. If we've got a multiplication factor between G1 and G2,
6971 scale G1's addend and try again. */
6972 if (INTVAL (mult) > 1)
6974 rtx g1_add_val = g1->add_val;
6975 if (GET_CODE (g1_add_val) == MULT
6976 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6978 HOST_WIDE_INT m;
6979 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6980 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6981 XEXP (g1_add_val, 0), GEN_INT (m));
6983 else
6985 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6986 mult);
6989 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6992 if (add == NULL_RTX)
6993 return NULL_RTX;
6995 /* Form simplified final result. */
6996 if (mult == const0_rtx)
6997 return add;
6998 else if (mult == const1_rtx)
6999 mult = g1->dest_reg;
7000 else
7001 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7003 if (add == const0_rtx)
7004 return mult;
7005 else
7007 if (GET_CODE (add) == PLUS
7008 && CONSTANT_P (XEXP (add, 1)))
7010 rtx tem = XEXP (add, 1);
7011 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7012 add = tem;
7015 return gen_rtx_PLUS (g2->mode, mult, add);
7020 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7021 represented by G1. This indicates that G2 should be combined with G1 and
7022 that G2 can use (either directly or via an address expression) a register
7023 used to represent G1. */
7025 static rtx
7026 combine_givs_p (g1, g2)
7027 struct induction *g1, *g2;
7029 rtx tem = express_from (g1, g2);
7031 /* If these givs are identical, they can be combined. We use the results
7032 of express_from because the addends are not in a canonical form, so
7033 rtx_equal_p is a weaker test. */
7034 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7035 combination to be the other way round. */
7036 if (tem == g1->dest_reg
7037 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7039 return g1->dest_reg;
7042 /* If G2 can be expressed as a function of G1 and that function is valid
7043 as an address and no more expensive than using a register for G2,
7044 the expression of G2 in terms of G1 can be used. */
7045 if (tem != NULL_RTX
7046 && g2->giv_type == DEST_ADDR
7047 && memory_address_p (g2->mem_mode, tem)
7048 /* ??? Looses, especially with -fforce-addr, where *g2->location
7049 will always be a register, and so anything more complicated
7050 gets discarded. */
7051 #if 0
7052 #ifdef ADDRESS_COST
7053 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7054 #else
7055 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7056 #endif
7057 #endif
7060 return tem;
7063 return NULL_RTX;
7066 struct combine_givs_stats
7068 int giv_number;
7069 int total_benefit;
7072 static int
7073 cmp_combine_givs_stats (xp, yp)
7074 const PTR xp;
7075 const PTR yp;
7077 const struct combine_givs_stats * const x =
7078 (const struct combine_givs_stats *) xp;
7079 const struct combine_givs_stats * const y =
7080 (const struct combine_givs_stats *) yp;
7081 int d;
7082 d = y->total_benefit - x->total_benefit;
7083 /* Stabilize the sort. */
7084 if (!d)
7085 d = x->giv_number - y->giv_number;
7086 return d;
7089 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7090 any other. If so, point SAME to the giv combined with and set NEW_REG to
7091 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7092 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7094 static void
7095 combine_givs (bl)
7096 struct iv_class *bl;
7098 /* Additional benefit to add for being combined multiple times. */
7099 const int extra_benefit = 3;
7101 struct induction *g1, *g2, **giv_array;
7102 int i, j, k, giv_count;
7103 struct combine_givs_stats *stats;
7104 rtx *can_combine;
7106 /* Count givs, because bl->giv_count is incorrect here. */
7107 giv_count = 0;
7108 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7109 if (!g1->ignore)
7110 giv_count++;
7112 giv_array
7113 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7114 i = 0;
7115 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7116 if (!g1->ignore)
7117 giv_array[i++] = g1;
7119 stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
7120 bzero ((char *) stats, giv_count * sizeof (*stats));
7122 can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
7123 bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
7125 for (i = 0; i < giv_count; i++)
7127 int this_benefit;
7128 rtx single_use;
7130 g1 = giv_array[i];
7131 stats[i].giv_number = i;
7133 /* If a DEST_REG GIV is used only once, do not allow it to combine
7134 with anything, for in doing so we will gain nothing that cannot
7135 be had by simply letting the GIV with which we would have combined
7136 to be reduced on its own. The losage shows up in particular with
7137 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7138 be seen elsewhere as well. */
7139 if (g1->giv_type == DEST_REG
7140 && (single_use = VARRAY_RTX (reg_single_usage, REGNO (g1->dest_reg)))
7141 && single_use != const0_rtx)
7142 continue;
7144 this_benefit = g1->benefit;
7145 /* Add an additional weight for zero addends. */
7146 if (g1->no_const_addval)
7147 this_benefit += 1;
7149 for (j = 0; j < giv_count; j++)
7151 rtx this_combine;
7153 g2 = giv_array[j];
7154 if (g1 != g2
7155 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7157 can_combine[i*giv_count + j] = this_combine;
7158 this_benefit += g2->benefit + extra_benefit;
7161 stats[i].total_benefit = this_benefit;
7164 /* Iterate, combining until we can't. */
7165 restart:
7166 qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
7168 if (loop_dump_stream)
7170 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7171 for (k = 0; k < giv_count; k++)
7173 g1 = giv_array[stats[k].giv_number];
7174 if (!g1->combined_with && !g1->same)
7175 fprintf (loop_dump_stream, " {%d, %d}",
7176 INSN_UID (giv_array[stats[k].giv_number]->insn),
7177 stats[k].total_benefit);
7179 putc ('\n', loop_dump_stream);
7182 for (k = 0; k < giv_count; k++)
7184 int g1_add_benefit = 0;
7186 i = stats[k].giv_number;
7187 g1 = giv_array[i];
7189 /* If it has already been combined, skip. */
7190 if (g1->combined_with || g1->same)
7191 continue;
7193 for (j = 0; j < giv_count; j++)
7195 g2 = giv_array[j];
7196 if (g1 != g2 && can_combine[i*giv_count + j]
7197 /* If it has already been combined, skip. */
7198 && ! g2->same && ! g2->combined_with)
7200 int l;
7202 g2->new_reg = can_combine[i*giv_count + j];
7203 g2->same = g1;
7204 g1->combined_with++;
7205 g1->lifetime += g2->lifetime;
7207 g1_add_benefit += g2->benefit;
7209 /* ??? The new final_[bg]iv_value code does a much better job
7210 of finding replaceable giv's, and hence this code may no
7211 longer be necessary. */
7212 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7213 g1_add_benefit -= copy_cost;
7215 /* To help optimize the next set of combinations, remove
7216 this giv from the benefits of other potential mates. */
7217 for (l = 0; l < giv_count; ++l)
7219 int m = stats[l].giv_number;
7220 if (can_combine[m*giv_count + j])
7221 stats[l].total_benefit -= g2->benefit + extra_benefit;
7224 if (loop_dump_stream)
7225 fprintf (loop_dump_stream,
7226 "giv at %d combined with giv at %d\n",
7227 INSN_UID (g2->insn), INSN_UID (g1->insn));
7231 /* To help optimize the next set of combinations, remove
7232 this giv from the benefits of other potential mates. */
7233 if (g1->combined_with)
7235 for (j = 0; j < giv_count; ++j)
7237 int m = stats[j].giv_number;
7238 if (can_combine[m*giv_count + i])
7239 stats[j].total_benefit -= g1->benefit + extra_benefit;
7242 g1->benefit += g1_add_benefit;
7244 /* We've finished with this giv, and everything it touched.
7245 Restart the combination so that proper weights for the
7246 rest of the givs are properly taken into account. */
7247 /* ??? Ideally we would compact the arrays at this point, so
7248 as to not cover old ground. But sanely compacting
7249 can_combine is tricky. */
7250 goto restart;
7255 struct recombine_givs_stats
7257 int giv_number;
7258 int start_luid, end_luid;
7261 /* Used below as comparison function for qsort. We want a ascending luid
7262 when scanning the array starting at the end, thus the arguments are
7263 used in reverse. */
7264 static int
7265 cmp_recombine_givs_stats (xp, yp)
7266 const PTR xp;
7267 const PTR yp;
7269 const struct recombine_givs_stats * const x =
7270 (const struct recombine_givs_stats *) xp;
7271 const struct recombine_givs_stats * const y =
7272 (const struct recombine_givs_stats *) yp;
7273 int d;
7274 d = y->start_luid - x->start_luid;
7275 /* Stabilize the sort. */
7276 if (!d)
7277 d = y->giv_number - x->giv_number;
7278 return d;
7281 /* Scan X, which is a part of INSN, for the end of life of a giv. Also
7282 look for the start of life of a giv where the start has not been seen
7283 yet to unlock the search for the end of its life.
7284 Only consider givs that belong to BIV.
7285 Return the total number of lifetime ends that have been found. */
7286 static int
7287 find_life_end (x, stats, insn, biv)
7288 rtx x, insn, biv;
7289 struct recombine_givs_stats *stats;
7291 enum rtx_code code;
7292 const char *fmt;
7293 int i, j;
7294 int retval;
7296 code = GET_CODE (x);
7297 switch (code)
7299 case SET:
7301 rtx reg = SET_DEST (x);
7302 if (GET_CODE (reg) == REG)
7304 int regno = REGNO (reg);
7305 struct induction *v = REG_IV_INFO (regno);
7307 if (REG_IV_TYPE (regno) == GENERAL_INDUCT
7308 && ! v->ignore
7309 && v->src_reg == biv
7310 && stats[v->ix].end_luid <= 0)
7312 /* If we see a 0 here for end_luid, it means that we have
7313 scanned the entire loop without finding any use at all.
7314 We must not predicate this code on a start_luid match
7315 since that would make the test fail for givs that have
7316 been hoisted out of inner loops. */
7317 if (stats[v->ix].end_luid == 0)
7319 stats[v->ix].end_luid = stats[v->ix].start_luid;
7320 return 1 + find_life_end (SET_SRC (x), stats, insn, biv);
7322 else if (stats[v->ix].start_luid == INSN_LUID (insn))
7323 stats[v->ix].end_luid = 0;
7325 return find_life_end (SET_SRC (x), stats, insn, biv);
7327 break;
7329 case REG:
7331 int regno = REGNO (x);
7332 struct induction *v = REG_IV_INFO (regno);
7334 if (REG_IV_TYPE (regno) == GENERAL_INDUCT
7335 && ! v->ignore
7336 && v->src_reg == biv
7337 && stats[v->ix].end_luid == 0)
7339 while (INSN_UID (insn) >= max_uid_for_loop)
7340 insn = NEXT_INSN (insn);
7341 stats[v->ix].end_luid = INSN_LUID (insn);
7342 return 1;
7344 return 0;
7346 case LABEL_REF:
7347 case CONST_DOUBLE:
7348 case CONST_INT:
7349 case CONST:
7350 return 0;
7351 default:
7352 break;
7354 fmt = GET_RTX_FORMAT (code);
7355 retval = 0;
7356 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7358 if (fmt[i] == 'e')
7359 retval += find_life_end (XEXP (x, i), stats, insn, biv);
7361 else if (fmt[i] == 'E')
7362 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7363 retval += find_life_end (XVECEXP (x, i, j), stats, insn, biv);
7365 return retval;
7368 /* For each giv that has been combined with another, look if
7369 we can combine it with the most recently used one instead.
7370 This tends to shorten giv lifetimes, and helps the next step:
7371 try to derive givs from other givs. */
7372 static void
7373 recombine_givs (bl, loop_start, loop_end, unroll_p)
7374 struct iv_class *bl;
7375 rtx loop_start, loop_end;
7376 int unroll_p;
7378 struct induction *v, **giv_array, *last_giv;
7379 struct recombine_givs_stats *stats;
7380 int giv_count;
7381 int i, rescan;
7382 int ends_need_computing;
7384 for (giv_count = 0, v = bl->giv; v; v = v->next_iv)
7386 if (! v->ignore)
7387 giv_count++;
7389 giv_array
7390 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7391 stats = (struct recombine_givs_stats *) alloca (giv_count * sizeof *stats);
7393 /* Initialize stats and set up the ix field for each giv in stats to name
7394 the corresponding index into stats. */
7395 for (i = 0, v = bl->giv; v; v = v->next_iv)
7397 rtx p;
7399 if (v->ignore)
7400 continue;
7401 giv_array[i] = v;
7402 stats[i].giv_number = i;
7403 /* If this giv has been hoisted out of an inner loop, use the luid of
7404 the previous insn. */
7405 for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
7406 p = PREV_INSN (p);
7407 stats[i].start_luid = INSN_LUID (p);
7408 i++;
7411 qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
7413 /* Set up the ix field for each giv in stats to name
7414 the corresponding index into stats, and
7415 do the actual most-recently-used recombination. */
7416 for (last_giv = 0, i = giv_count - 1; i >= 0; i--)
7418 v = giv_array[stats[i].giv_number];
7419 v->ix = i;
7420 if (v->same)
7422 struct induction *old_same = v->same;
7423 rtx new_combine;
7425 /* combine_givs_p actually says if we can make this transformation.
7426 The other tests are here only to avoid keeping a giv alive
7427 that could otherwise be eliminated. */
7428 if (last_giv
7429 && ((old_same->maybe_dead && ! old_same->combined_with)
7430 || ! last_giv->maybe_dead
7431 || last_giv->combined_with)
7432 && (new_combine = combine_givs_p (last_giv, v)))
7434 old_same->combined_with--;
7435 v->new_reg = new_combine;
7436 v->same = last_giv;
7437 last_giv->combined_with++;
7438 /* No need to update lifetimes / benefits here since we have
7439 already decided what to reduce. */
7441 if (loop_dump_stream)
7443 fprintf (loop_dump_stream,
7444 "giv at %d recombined with giv at %d as ",
7445 INSN_UID (v->insn), INSN_UID (last_giv->insn));
7446 print_rtl (loop_dump_stream, v->new_reg);
7447 putc ('\n', loop_dump_stream);
7449 continue;
7451 v = v->same;
7453 else if (v->giv_type != DEST_REG)
7454 continue;
7455 if (! last_giv
7456 || (last_giv->maybe_dead && ! last_giv->combined_with)
7457 || ! v->maybe_dead
7458 || v->combined_with)
7459 last_giv = v;
7462 ends_need_computing = 0;
7463 /* For each DEST_REG giv, compute lifetime starts, and try to compute
7464 lifetime ends from regscan info. */
7465 for (i = giv_count - 1; i >= 0; i--)
7467 v = giv_array[stats[i].giv_number];
7468 if (v->ignore)
7469 continue;
7470 if (v->giv_type == DEST_ADDR)
7472 /* Loop unrolling of an inner loop can even create new DEST_REG
7473 givs. */
7474 rtx p;
7475 for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; )
7476 p = PREV_INSN (p);
7477 stats[i].start_luid = stats[i].end_luid = INSN_LUID (p);
7478 if (p != v->insn)
7479 stats[i].end_luid++;
7481 else /* v->giv_type == DEST_REG */
7483 if (v->last_use)
7485 stats[i].start_luid = INSN_LUID (v->insn);
7486 stats[i].end_luid = INSN_LUID (v->last_use);
7488 else if (INSN_UID (v->insn) >= max_uid_for_loop)
7490 rtx p;
7491 /* This insn has been created by loop optimization on an inner
7492 loop. We don't have a proper start_luid that will match
7493 when we see the first set. But we do know that there will
7494 be no use before the set, so we can set end_luid to 0 so that
7495 we'll start looking for the last use right away. */
7496 for (p = PREV_INSN (v->insn); INSN_UID (p) >= max_uid_for_loop; )
7497 p = PREV_INSN (p);
7498 stats[i].start_luid = INSN_LUID (p);
7499 stats[i].end_luid = 0;
7500 ends_need_computing++;
7502 else
7504 int regno = REGNO (v->dest_reg);
7505 int count = VARRAY_INT (n_times_set, regno) - 1;
7506 rtx p = v->insn;
7508 /* Find the first insn that sets the giv, so that we can verify
7509 if this giv's lifetime wraps around the loop. We also need
7510 the luid of the first setting insn in order to detect the
7511 last use properly. */
7512 while (count)
7514 p = prev_nonnote_insn (p);
7515 if (reg_set_p (v->dest_reg, p))
7516 count--;
7519 stats[i].start_luid = INSN_LUID (p);
7520 if (stats[i].start_luid > uid_luid[REGNO_FIRST_UID (regno)])
7522 stats[i].end_luid = -1;
7523 ends_need_computing++;
7525 else
7527 stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)];
7528 if (stats[i].end_luid > INSN_LUID (loop_end))
7530 stats[i].end_luid = -1;
7531 ends_need_computing++;
7538 /* If the regscan information was unconclusive for one or more DEST_REG
7539 givs, scan the all insn in the loop to find out lifetime ends. */
7540 if (ends_need_computing)
7542 rtx biv = bl->biv->src_reg;
7543 rtx p = loop_end;
7547 if (p == loop_start)
7548 p = loop_end;
7549 p = PREV_INSN (p);
7550 if (GET_RTX_CLASS (GET_CODE (p)) != 'i')
7551 continue;
7552 ends_need_computing -= find_life_end (PATTERN (p), stats, p, biv);
7554 while (ends_need_computing);
7557 /* Set start_luid back to the last insn that sets the giv. This allows
7558 more combinations. */
7559 for (i = giv_count - 1; i >= 0; i--)
7561 v = giv_array[stats[i].giv_number];
7562 if (v->ignore)
7563 continue;
7564 if (INSN_UID (v->insn) < max_uid_for_loop)
7565 stats[i].start_luid = INSN_LUID (v->insn);
7568 /* Now adjust lifetime ends by taking combined givs into account. */
7569 for (i = giv_count - 1; i >= 0; i--)
7571 unsigned luid;
7572 int j;
7574 v = giv_array[stats[i].giv_number];
7575 if (v->ignore)
7576 continue;
7577 if (v->same && ! v->same->ignore)
7579 j = v->same->ix;
7580 luid = stats[i].start_luid;
7581 /* Use unsigned arithmetic to model loop wrap-around. */
7582 if (luid - stats[j].start_luid
7583 > (unsigned) stats[j].end_luid - stats[j].start_luid)
7584 stats[j].end_luid = luid;
7588 qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats);
7590 /* Try to derive DEST_REG givs from previous DEST_REG givs with the
7591 same mult_val and non-overlapping lifetime. This reduces register
7592 pressure.
7593 Once we find a DEST_REG giv that is suitable to derive others from,
7594 we set last_giv to this giv, and try to derive as many other DEST_REG
7595 givs from it without joining overlapping lifetimes. If we then
7596 encounter a DEST_REG giv that we can't derive, we set rescan to the
7597 index for this giv (unless rescan is already set).
7598 When we are finished with the current LAST_GIV (i.e. the inner loop
7599 terminates), we start again with rescan, which then becomes the new
7600 LAST_GIV. */
7601 for (i = giv_count - 1; i >= 0; i = rescan)
7603 int life_start, life_end;
7605 for (last_giv = 0, rescan = -1; i >= 0; i--)
7607 rtx sum;
7609 v = giv_array[stats[i].giv_number];
7610 if (v->giv_type != DEST_REG || v->derived_from || v->same)
7611 continue;
7612 if (! last_giv)
7614 /* Don't use a giv that's likely to be dead to derive
7615 others - that would be likely to keep that giv alive. */
7616 if (! v->maybe_dead || v->combined_with)
7618 last_giv = v;
7619 life_start = stats[i].start_luid;
7620 life_end = stats[i].end_luid;
7622 continue;
7624 /* Use unsigned arithmetic to model loop wrap around. */
7625 if (((unsigned) stats[i].start_luid - life_start
7626 >= (unsigned) life_end - life_start)
7627 && ((unsigned) stats[i].end_luid - life_start
7628 > (unsigned) life_end - life_start)
7629 /* Check that the giv insn we're about to use for deriving
7630 precedes all uses of that giv. Note that initializing the
7631 derived giv would defeat the purpose of reducing register
7632 pressure.
7633 ??? We could arrange to move the insn. */
7634 && ((unsigned) stats[i].end_luid - INSN_LUID (loop_start)
7635 > (unsigned) stats[i].start_luid - INSN_LUID (loop_start))
7636 && rtx_equal_p (last_giv->mult_val, v->mult_val)
7637 /* ??? Could handle libcalls, but would need more logic. */
7638 && ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX)
7639 /* We would really like to know if for any giv that v
7640 is combined with, v->insn or any intervening biv increment
7641 dominates that combined giv. However, we
7642 don't have this detailed control flow information.
7643 N.B. since last_giv will be reduced, it is valid
7644 anywhere in the loop, so we don't need to check the
7645 validity of last_giv.
7646 We rely here on the fact that v->always_executed implies that
7647 there is no jump to someplace else in the loop before the
7648 giv insn, and hence any insn that is executed before the
7649 giv insn in the loop will have a lower luid. */
7650 && (v->always_executed || ! v->combined_with)
7651 && (sum = express_from (last_giv, v))
7652 /* Make sure we don't make the add more expensive. ADD_COST
7653 doesn't take different costs of registers and constants into
7654 account, so compare the cost of the actual SET_SRCs. */
7655 && (rtx_cost (sum, SET)
7656 <= rtx_cost (SET_SRC (single_set (v->insn)), SET))
7657 /* ??? unroll can't understand anything but reg + const_int
7658 sums. It would be cleaner to fix unroll. */
7659 && ((GET_CODE (sum) == PLUS
7660 && GET_CODE (XEXP (sum, 0)) == REG
7661 && GET_CODE (XEXP (sum, 1)) == CONST_INT)
7662 || ! unroll_p)
7663 && validate_change (v->insn, &PATTERN (v->insn),
7664 gen_rtx_SET (VOIDmode, v->dest_reg, sum), 0))
7666 v->derived_from = last_giv;
7667 life_end = stats[i].end_luid;
7669 if (loop_dump_stream)
7671 fprintf (loop_dump_stream,
7672 "giv at %d derived from %d as ",
7673 INSN_UID (v->insn), INSN_UID (last_giv->insn));
7674 print_rtl (loop_dump_stream, sum);
7675 putc ('\n', loop_dump_stream);
7678 else if (rescan < 0)
7679 rescan = i;
7684 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
7686 void
7687 emit_iv_add_mult (b, m, a, reg, insert_before)
7688 rtx b; /* initial value of basic induction variable */
7689 rtx m; /* multiplicative constant */
7690 rtx a; /* additive constant */
7691 rtx reg; /* destination register */
7692 rtx insert_before;
7694 rtx seq;
7695 rtx result;
7697 /* Prevent unexpected sharing of these rtx. */
7698 a = copy_rtx (a);
7699 b = copy_rtx (b);
7701 /* Increase the lifetime of any invariants moved further in code. */
7702 update_reg_last_use (a, insert_before);
7703 update_reg_last_use (b, insert_before);
7704 update_reg_last_use (m, insert_before);
7706 start_sequence ();
7707 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
7708 if (reg != result)
7709 emit_move_insn (reg, result);
7710 seq = gen_sequence ();
7711 end_sequence ();
7713 emit_insn_before (seq, insert_before);
7715 /* It is entirely possible that the expansion created lots of new
7716 registers. Iterate over the sequence we just created and
7717 record them all. */
7719 if (GET_CODE (seq) == SEQUENCE)
7721 int i;
7722 for (i = 0; i < XVECLEN (seq, 0); ++i)
7724 rtx set = single_set (XVECEXP (seq, 0, i));
7725 if (set && GET_CODE (SET_DEST (set)) == REG)
7726 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7729 else if (GET_CODE (seq) == SET
7730 && GET_CODE (SET_DEST (seq)) == REG)
7731 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7734 /* Test whether A * B can be computed without
7735 an actual multiply insn. Value is 1 if so. */
7737 static int
7738 product_cheap_p (a, b)
7739 rtx a;
7740 rtx b;
7742 int i;
7743 rtx tmp;
7744 struct obstack *old_rtl_obstack = rtl_obstack;
7745 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
7746 int win = 1;
7748 /* If only one is constant, make it B. */
7749 if (GET_CODE (a) == CONST_INT)
7750 tmp = a, a = b, b = tmp;
7752 /* If first constant, both constant, so don't need multiply. */
7753 if (GET_CODE (a) == CONST_INT)
7754 return 1;
7756 /* If second not constant, neither is constant, so would need multiply. */
7757 if (GET_CODE (b) != CONST_INT)
7758 return 0;
7760 /* One operand is constant, so might not need multiply insn. Generate the
7761 code for the multiply and see if a call or multiply, or long sequence
7762 of insns is generated. */
7764 rtl_obstack = &temp_obstack;
7765 start_sequence ();
7766 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
7767 tmp = gen_sequence ();
7768 end_sequence ();
7770 if (GET_CODE (tmp) == SEQUENCE)
7772 if (XVEC (tmp, 0) == 0)
7773 win = 1;
7774 else if (XVECLEN (tmp, 0) > 3)
7775 win = 0;
7776 else
7777 for (i = 0; i < XVECLEN (tmp, 0); i++)
7779 rtx insn = XVECEXP (tmp, 0, i);
7781 if (GET_CODE (insn) != INSN
7782 || (GET_CODE (PATTERN (insn)) == SET
7783 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7784 || (GET_CODE (PATTERN (insn)) == PARALLEL
7785 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7786 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7788 win = 0;
7789 break;
7793 else if (GET_CODE (tmp) == SET
7794 && GET_CODE (SET_SRC (tmp)) == MULT)
7795 win = 0;
7796 else if (GET_CODE (tmp) == PARALLEL
7797 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7798 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7799 win = 0;
7801 /* Free any storage we obtained in generating this multiply and restore rtl
7802 allocation to its normal obstack. */
7803 obstack_free (&temp_obstack, storage);
7804 rtl_obstack = old_rtl_obstack;
7806 return win;
7809 /* Check to see if loop can be terminated by a "decrement and branch until
7810 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7811 Also try reversing an increment loop to a decrement loop
7812 to see if the optimization can be performed.
7813 Value is nonzero if optimization was performed. */
7815 /* This is useful even if the architecture doesn't have such an insn,
7816 because it might change a loops which increments from 0 to n to a loop
7817 which decrements from n to 0. A loop that decrements to zero is usually
7818 faster than one that increments from zero. */
7820 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7821 such as approx_final_value, biv_total_increment, loop_iterations, and
7822 final_[bg]iv_value. */
7824 static int
7825 check_dbra_loop (loop_end, insn_count, loop_start, loop_info)
7826 rtx loop_end;
7827 int insn_count;
7828 rtx loop_start;
7829 struct loop_info *loop_info;
7831 struct iv_class *bl;
7832 rtx reg;
7833 rtx jump_label;
7834 rtx final_value;
7835 rtx start_value;
7836 rtx new_add_val;
7837 rtx comparison;
7838 rtx before_comparison;
7839 rtx p;
7840 rtx jump;
7841 rtx first_compare;
7842 int compare_and_branch;
7844 /* If last insn is a conditional branch, and the insn before tests a
7845 register value, try to optimize it. Otherwise, we can't do anything. */
7847 jump = PREV_INSN (loop_end);
7848 comparison = get_condition_for_loop (jump);
7849 if (comparison == 0)
7850 return 0;
7852 /* Try to compute whether the compare/branch at the loop end is one or
7853 two instructions. */
7854 get_condition (jump, &first_compare);
7855 if (first_compare == jump)
7856 compare_and_branch = 1;
7857 else if (first_compare == prev_nonnote_insn (jump))
7858 compare_and_branch = 2;
7859 else
7860 return 0;
7862 /* Check all of the bivs to see if the compare uses one of them.
7863 Skip biv's set more than once because we can't guarantee that
7864 it will be zero on the last iteration. Also skip if the biv is
7865 used between its update and the test insn. */
7867 for (bl = loop_iv_list; bl; bl = bl->next)
7869 if (bl->biv_count == 1
7870 && bl->biv->dest_reg == XEXP (comparison, 0)
7871 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7872 first_compare))
7873 break;
7876 if (! bl)
7877 return 0;
7879 /* Look for the case where the basic induction variable is always
7880 nonnegative, and equals zero on the last iteration.
7881 In this case, add a reg_note REG_NONNEG, which allows the
7882 m68k DBRA instruction to be used. */
7884 if (((GET_CODE (comparison) == GT
7885 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7886 && INTVAL (XEXP (comparison, 1)) == -1)
7887 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7888 && GET_CODE (bl->biv->add_val) == CONST_INT
7889 && INTVAL (bl->biv->add_val) < 0)
7891 /* Initial value must be greater than 0,
7892 init_val % -dec_value == 0 to ensure that it equals zero on
7893 the last iteration */
7895 if (GET_CODE (bl->initial_value) == CONST_INT
7896 && INTVAL (bl->initial_value) > 0
7897 && (INTVAL (bl->initial_value)
7898 % (-INTVAL (bl->biv->add_val))) == 0)
7900 /* register always nonnegative, add REG_NOTE to branch */
7901 REG_NOTES (PREV_INSN (loop_end))
7902 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
7903 REG_NOTES (PREV_INSN (loop_end)));
7904 bl->nonneg = 1;
7906 return 1;
7909 /* If the decrement is 1 and the value was tested as >= 0 before
7910 the loop, then we can safely optimize. */
7911 for (p = loop_start; p; p = PREV_INSN (p))
7913 if (GET_CODE (p) == CODE_LABEL)
7914 break;
7915 if (GET_CODE (p) != JUMP_INSN)
7916 continue;
7918 before_comparison = get_condition_for_loop (p);
7919 if (before_comparison
7920 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7921 && GET_CODE (before_comparison) == LT
7922 && XEXP (before_comparison, 1) == const0_rtx
7923 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7924 && INTVAL (bl->biv->add_val) == -1)
7926 REG_NOTES (PREV_INSN (loop_end))
7927 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
7928 REG_NOTES (PREV_INSN (loop_end)));
7929 bl->nonneg = 1;
7931 return 1;
7935 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7936 && INTVAL (bl->biv->add_val) > 0)
7938 /* Try to change inc to dec, so can apply above optimization. */
7939 /* Can do this if:
7940 all registers modified are induction variables or invariant,
7941 all memory references have non-overlapping addresses
7942 (obviously true if only one write)
7943 allow 2 insns for the compare/jump at the end of the loop. */
7944 /* Also, we must avoid any instructions which use both the reversed
7945 biv and another biv. Such instructions will fail if the loop is
7946 reversed. We meet this condition by requiring that either
7947 no_use_except_counting is true, or else that there is only
7948 one biv. */
7949 int num_nonfixed_reads = 0;
7950 /* 1 if the iteration var is used only to count iterations. */
7951 int no_use_except_counting = 0;
7952 /* 1 if the loop has no memory store, or it has a single memory store
7953 which is reversible. */
7954 int reversible_mem_store = 1;
7956 if (bl->giv_count == 0
7957 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
7959 rtx bivreg = regno_reg_rtx[bl->regno];
7961 /* If there are no givs for this biv, and the only exit is the
7962 fall through at the end of the loop, then
7963 see if perhaps there are no uses except to count. */
7964 no_use_except_counting = 1;
7965 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7966 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
7968 rtx set = single_set (p);
7970 if (set && GET_CODE (SET_DEST (set)) == REG
7971 && REGNO (SET_DEST (set)) == bl->regno)
7972 /* An insn that sets the biv is okay. */
7974 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7975 || p == prev_nonnote_insn (loop_end))
7976 && reg_mentioned_p (bivreg, PATTERN (p)))
7978 /* If either of these insns uses the biv and sets a pseudo
7979 that has more than one usage, then the biv has uses
7980 other than counting since it's used to derive a value
7981 that is used more than one time. */
7982 int note_set_pseudo_multiple_uses_retval = 0;
7983 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7984 &note_set_pseudo_multiple_uses_retval);
7985 if (note_set_pseudo_multiple_uses_retval)
7987 no_use_except_counting = 0;
7988 break;
7991 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7993 no_use_except_counting = 0;
7994 break;
7999 if (no_use_except_counting)
8000 ; /* no need to worry about MEMs. */
8001 else if (num_mem_sets <= 1)
8003 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8004 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
8005 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
8007 /* If the loop has a single store, and the destination address is
8008 invariant, then we can't reverse the loop, because this address
8009 might then have the wrong value at loop exit.
8010 This would work if the source was invariant also, however, in that
8011 case, the insn should have been moved out of the loop. */
8013 if (num_mem_sets == 1)
8015 struct induction *v;
8017 reversible_mem_store
8018 = (! unknown_address_altered
8019 && ! unknown_constant_address_altered
8020 && ! invariant_p (XEXP (XEXP (loop_store_mems, 0), 0)));
8022 /* If the store depends on a register that is set after the
8023 store, it depends on the initial value, and is thus not
8024 reversible. */
8025 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8027 if (v->giv_type == DEST_REG
8028 && reg_mentioned_p (v->dest_reg,
8029 XEXP (loop_store_mems, 0))
8030 && loop_insn_first_p (first_loop_store_insn, v->insn))
8031 reversible_mem_store = 0;
8035 else
8036 return 0;
8038 /* This code only acts for innermost loops. Also it simplifies
8039 the memory address check by only reversing loops with
8040 zero or one memory access.
8041 Two memory accesses could involve parts of the same array,
8042 and that can't be reversed.
8043 If the biv is used only for counting, than we don't need to worry
8044 about all these things. */
8046 if ((num_nonfixed_reads <= 1
8047 && ! loop_info->has_call
8048 && ! loop_info->has_volatile
8049 && reversible_mem_store
8050 && (bl->giv_count + bl->biv_count + num_mem_sets
8051 + num_movables + compare_and_branch == insn_count)
8052 && (bl == loop_iv_list && bl->next == 0))
8053 || no_use_except_counting)
8055 rtx tem;
8057 /* Loop can be reversed. */
8058 if (loop_dump_stream)
8059 fprintf (loop_dump_stream, "Can reverse loop\n");
8061 /* Now check other conditions:
8063 The increment must be a constant, as must the initial value,
8064 and the comparison code must be LT.
8066 This test can probably be improved since +/- 1 in the constant
8067 can be obtained by changing LT to LE and vice versa; this is
8068 confusing. */
8070 if (comparison
8071 /* for constants, LE gets turned into LT */
8072 && (GET_CODE (comparison) == LT
8073 || (GET_CODE (comparison) == LE
8074 && no_use_except_counting)))
8076 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8077 rtx initial_value, comparison_value;
8078 int nonneg = 0;
8079 enum rtx_code cmp_code;
8080 int comparison_const_width;
8081 unsigned HOST_WIDE_INT comparison_sign_mask;
8083 add_val = INTVAL (bl->biv->add_val);
8084 comparison_value = XEXP (comparison, 1);
8085 if (GET_MODE (comparison_value) == VOIDmode)
8086 comparison_const_width
8087 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8088 else
8089 comparison_const_width
8090 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8091 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8092 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8093 comparison_sign_mask
8094 = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1);
8096 /* If the comparison value is not a loop invariant, then we
8097 can not reverse this loop.
8099 ??? If the insns which initialize the comparison value as
8100 a whole compute an invariant result, then we could move
8101 them out of the loop and proceed with loop reversal. */
8102 if (!invariant_p (comparison_value))
8103 return 0;
8105 if (GET_CODE (comparison_value) == CONST_INT)
8106 comparison_val = INTVAL (comparison_value);
8107 initial_value = bl->initial_value;
8109 /* Normalize the initial value if it is an integer and
8110 has no other use except as a counter. This will allow
8111 a few more loops to be reversed. */
8112 if (no_use_except_counting
8113 && GET_CODE (comparison_value) == CONST_INT
8114 && GET_CODE (initial_value) == CONST_INT)
8116 comparison_val = comparison_val - INTVAL (bl->initial_value);
8117 /* The code below requires comparison_val to be a multiple
8118 of add_val in order to do the loop reversal, so
8119 round up comparison_val to a multiple of add_val.
8120 Since comparison_value is constant, we know that the
8121 current comparison code is LT. */
8122 comparison_val = comparison_val + add_val - 1;
8123 comparison_val
8124 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8125 /* We postpone overflow checks for COMPARISON_VAL here;
8126 even if there is an overflow, we might still be able to
8127 reverse the loop, if converting the loop exit test to
8128 NE is possible. */
8129 initial_value = const0_rtx;
8132 /* First check if we can do a vanilla loop reversal. */
8133 if (initial_value == const0_rtx
8134 /* If we have a decrement_and_branch_on_count,
8135 prefer the NE test, since this will allow that
8136 instruction to be generated. Note that we must
8137 use a vanilla loop reversal if the biv is used to
8138 calculate a giv or has a non-counting use. */
8139 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8140 && defined (HAVE_decrement_and_branch_on_count)
8141 && (! (add_val == 1 && loop_info->vtop
8142 && (bl->biv_count == 0
8143 || no_use_except_counting)))
8144 #endif
8145 && GET_CODE (comparison_value) == CONST_INT
8146 /* Now do postponed overflow checks on COMPARISON_VAL. */
8147 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8148 & comparison_sign_mask))
8150 /* Register will always be nonnegative, with value
8151 0 on last iteration */
8152 add_adjust = add_val;
8153 nonneg = 1;
8154 cmp_code = GE;
8156 else if (add_val == 1 && loop_info->vtop
8157 && (bl->biv_count == 0
8158 || no_use_except_counting))
8160 add_adjust = 0;
8161 cmp_code = NE;
8163 else
8164 return 0;
8166 if (GET_CODE (comparison) == LE)
8167 add_adjust -= add_val;
8169 /* If the initial value is not zero, or if the comparison
8170 value is not an exact multiple of the increment, then we
8171 can not reverse this loop. */
8172 if (initial_value == const0_rtx
8173 && GET_CODE (comparison_value) == CONST_INT)
8175 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8176 return 0;
8178 else
8180 if (! no_use_except_counting || add_val != 1)
8181 return 0;
8184 final_value = comparison_value;
8186 /* Reset these in case we normalized the initial value
8187 and comparison value above. */
8188 if (GET_CODE (comparison_value) == CONST_INT
8189 && GET_CODE (initial_value) == CONST_INT)
8191 comparison_value = GEN_INT (comparison_val);
8192 final_value
8193 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8195 bl->initial_value = initial_value;
8197 /* Save some info needed to produce the new insns. */
8198 reg = bl->biv->dest_reg;
8199 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
8200 if (jump_label == pc_rtx)
8201 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
8202 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
8204 /* Set start_value; if this is not a CONST_INT, we need
8205 to generate a SUB.
8206 Initialize biv to start_value before loop start.
8207 The old initializing insn will be deleted as a
8208 dead store by flow.c. */
8209 if (initial_value == const0_rtx
8210 && GET_CODE (comparison_value) == CONST_INT)
8212 start_value = GEN_INT (comparison_val - add_adjust);
8213 emit_insn_before (gen_move_insn (reg, start_value),
8214 loop_start);
8216 else if (GET_CODE (initial_value) == CONST_INT)
8218 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8219 enum machine_mode mode = GET_MODE (reg);
8220 enum insn_code icode
8221 = add_optab->handlers[(int) mode].insn_code;
8223 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
8224 || ! ((*insn_data[icode].operand[1].predicate)
8225 (comparison_value, mode))
8226 || ! ((*insn_data[icode].operand[2].predicate)
8227 (offset, mode)))
8228 return 0;
8229 start_value
8230 = gen_rtx_PLUS (mode, comparison_value, offset);
8231 emit_insn_before ((GEN_FCN (icode)
8232 (reg, comparison_value, offset)),
8233 loop_start);
8234 if (GET_CODE (comparison) == LE)
8235 final_value = gen_rtx_PLUS (mode, comparison_value,
8236 GEN_INT (add_val));
8238 else if (! add_adjust)
8240 enum machine_mode mode = GET_MODE (reg);
8241 enum insn_code icode
8242 = sub_optab->handlers[(int) mode].insn_code;
8243 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
8244 || ! ((*insn_data[icode].operand[1].predicate)
8245 (comparison_value, mode))
8246 || ! ((*insn_data[icode].operand[2].predicate)
8247 (initial_value, mode)))
8248 return 0;
8249 start_value
8250 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8251 emit_insn_before ((GEN_FCN (icode)
8252 (reg, comparison_value, initial_value)),
8253 loop_start);
8255 else
8256 /* We could handle the other cases too, but it'll be
8257 better to have a testcase first. */
8258 return 0;
8260 /* We may not have a single insn which can increment a reg, so
8261 create a sequence to hold all the insns from expand_inc. */
8262 start_sequence ();
8263 expand_inc (reg, new_add_val);
8264 tem = gen_sequence ();
8265 end_sequence ();
8267 p = emit_insn_before (tem, bl->biv->insn);
8268 delete_insn (bl->biv->insn);
8270 /* Update biv info to reflect its new status. */
8271 bl->biv->insn = p;
8272 bl->initial_value = start_value;
8273 bl->biv->add_val = new_add_val;
8275 /* Update loop info. */
8276 loop_info->initial_value = reg;
8277 loop_info->initial_equiv_value = reg;
8278 loop_info->final_value = const0_rtx;
8279 loop_info->final_equiv_value = const0_rtx;
8280 loop_info->comparison_value = const0_rtx;
8281 loop_info->comparison_code = cmp_code;
8282 loop_info->increment = new_add_val;
8284 /* Inc LABEL_NUSES so that delete_insn will
8285 not delete the label. */
8286 LABEL_NUSES (XEXP (jump_label, 0)) ++;
8288 /* Emit an insn after the end of the loop to set the biv's
8289 proper exit value if it is used anywhere outside the loop. */
8290 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8291 || ! bl->init_insn
8292 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8293 emit_insn_after (gen_move_insn (reg, final_value),
8294 loop_end);
8296 /* Delete compare/branch at end of loop. */
8297 delete_insn (PREV_INSN (loop_end));
8298 if (compare_and_branch == 2)
8299 delete_insn (first_compare);
8301 /* Add new compare/branch insn at end of loop. */
8302 start_sequence ();
8303 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8304 GET_MODE (reg), 0, 0,
8305 XEXP (jump_label, 0));
8306 tem = gen_sequence ();
8307 end_sequence ();
8308 emit_jump_insn_before (tem, loop_end);
8310 for (tem = PREV_INSN (loop_end);
8311 tem && GET_CODE (tem) != JUMP_INSN;
8312 tem = PREV_INSN (tem))
8315 if (tem)
8316 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8318 if (nonneg)
8320 if (tem)
8322 /* Increment of LABEL_NUSES done above. */
8323 /* Register is now always nonnegative,
8324 so add REG_NONNEG note to the branch. */
8325 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
8326 REG_NOTES (tem));
8328 bl->nonneg = 1;
8331 /* No insn may reference both the reversed and another biv or it
8332 will fail (see comment near the top of the loop reversal
8333 code).
8334 Earlier on, we have verified that the biv has no use except
8335 counting, or it is the only biv in this function.
8336 However, the code that computes no_use_except_counting does
8337 not verify reg notes. It's possible to have an insn that
8338 references another biv, and has a REG_EQUAL note with an
8339 expression based on the reversed biv. To avoid this case,
8340 remove all REG_EQUAL notes based on the reversed biv
8341 here. */
8342 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8343 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
8345 rtx *pnote;
8346 rtx set = single_set (p);
8347 /* If this is a set of a GIV based on the reversed biv, any
8348 REG_EQUAL notes should still be correct. */
8349 if (! set
8350 || GET_CODE (SET_DEST (set)) != REG
8351 || (size_t) REGNO (SET_DEST (set)) >= reg_iv_type->num_elements
8352 || REG_IV_TYPE (REGNO (SET_DEST (set))) != GENERAL_INDUCT
8353 || REG_IV_INFO (REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8354 for (pnote = &REG_NOTES (p); *pnote;)
8356 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8357 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8358 XEXP (*pnote, 0)))
8359 *pnote = XEXP (*pnote, 1);
8360 else
8361 pnote = &XEXP (*pnote, 1);
8365 /* Mark that this biv has been reversed. Each giv which depends
8366 on this biv, and which is also live past the end of the loop
8367 will have to be fixed up. */
8369 bl->reversed = 1;
8371 if (loop_dump_stream)
8373 fprintf (loop_dump_stream, "Reversed loop");
8374 if (bl->nonneg)
8375 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8376 else
8377 fprintf (loop_dump_stream, "\n");
8380 return 1;
8385 return 0;
8388 /* Verify whether the biv BL appears to be eliminable,
8389 based on the insns in the loop that refer to it.
8390 LOOP_START is the first insn of the loop, and END is the end insn.
8392 If ELIMINATE_P is non-zero, actually do the elimination.
8394 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8395 determine whether invariant insns should be placed inside or at the
8396 start of the loop. */
8398 static int
8399 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
8400 struct iv_class *bl;
8401 rtx loop_start;
8402 rtx end;
8403 int eliminate_p;
8404 int threshold, insn_count;
8406 rtx reg = bl->biv->dest_reg;
8407 rtx p;
8409 /* Scan all insns in the loop, stopping if we find one that uses the
8410 biv in a way that we cannot eliminate. */
8412 for (p = loop_start; p != end; p = NEXT_INSN (p))
8414 enum rtx_code code = GET_CODE (p);
8415 rtx where = threshold >= insn_count ? loop_start : p;
8417 /* If this is a libcall that sets a giv, skip ahead to its end. */
8418 if (GET_RTX_CLASS (code) == 'i')
8420 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8422 if (note)
8424 rtx last = XEXP (note, 0);
8425 rtx set = single_set (last);
8427 if (set && GET_CODE (SET_DEST (set)) == REG)
8429 int regno = REGNO (SET_DEST (set));
8431 if (regno < max_reg_before_loop
8432 && REG_IV_TYPE (regno) == GENERAL_INDUCT
8433 && REG_IV_INFO (regno)->src_reg == bl->biv->src_reg)
8434 p = last;
8438 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8439 && reg_mentioned_p (reg, PATTERN (p))
8440 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
8442 if (loop_dump_stream)
8443 fprintf (loop_dump_stream,
8444 "Cannot eliminate biv %d: biv used in insn %d.\n",
8445 bl->regno, INSN_UID (p));
8446 break;
8450 if (p == end)
8452 if (loop_dump_stream)
8453 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8454 bl->regno, eliminate_p ? "was" : "can be");
8455 return 1;
8458 return 0;
8461 /* INSN and REFERENCE are instructions in the same insn chain.
8462 Return non-zero if INSN is first. */
8465 loop_insn_first_p (insn, reference)
8466 rtx insn, reference;
8468 rtx p, q;
8470 for (p = insn, q = reference; ;)
8472 /* Start with test for not first so that INSN == REFERENCE yields not
8473 first. */
8474 if (q == insn || ! p)
8475 return 0;
8476 if (p == reference || ! q)
8477 return 1;
8479 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8480 previous insn, hence the <= comparison below does not work if
8481 P is a note. */
8482 if (INSN_UID (p) < max_uid_for_loop
8483 && INSN_UID (q) < max_uid_for_loop
8484 && GET_CODE (p) != NOTE)
8485 return INSN_LUID (p) <= INSN_LUID (q);
8487 if (INSN_UID (p) >= max_uid_for_loop
8488 || GET_CODE (p) == NOTE)
8489 p = NEXT_INSN (p);
8490 if (INSN_UID (q) >= max_uid_for_loop)
8491 q = NEXT_INSN (q);
8495 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8496 the offset that we have to take into account due to auto-increment /
8497 div derivation is zero. */
8498 static int
8499 biv_elimination_giv_has_0_offset (biv, giv, insn)
8500 struct induction *biv, *giv;
8501 rtx insn;
8503 /* If the giv V had the auto-inc address optimization applied
8504 to it, and INSN occurs between the giv insn and the biv
8505 insn, then we'd have to adjust the value used here.
8506 This is rare, so we don't bother to make this possible. */
8507 if (giv->auto_inc_opt
8508 && ((loop_insn_first_p (giv->insn, insn)
8509 && loop_insn_first_p (insn, biv->insn))
8510 || (loop_insn_first_p (biv->insn, insn)
8511 && loop_insn_first_p (insn, giv->insn))))
8512 return 0;
8514 /* If the giv V was derived from another giv, and INSN does
8515 not occur between the giv insn and the biv insn, then we'd
8516 have to adjust the value used here. This is rare, so we don't
8517 bother to make this possible. */
8518 if (giv->derived_from
8519 && ! (giv->always_executed
8520 && loop_insn_first_p (giv->insn, insn)
8521 && loop_insn_first_p (insn, biv->insn)))
8522 return 0;
8523 if (giv->same
8524 && giv->same->derived_from
8525 && ! (giv->same->always_executed
8526 && loop_insn_first_p (giv->same->insn, insn)
8527 && loop_insn_first_p (insn, biv->insn)))
8528 return 0;
8530 return 1;
8533 /* If BL appears in X (part of the pattern of INSN), see if we can
8534 eliminate its use. If so, return 1. If not, return 0.
8536 If BIV does not appear in X, return 1.
8538 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
8539 where extra insns should be added. Depending on how many items have been
8540 moved out of the loop, it will either be before INSN or at the start of
8541 the loop. */
8543 static int
8544 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
8545 rtx x, insn;
8546 struct iv_class *bl;
8547 int eliminate_p;
8548 rtx where;
8550 enum rtx_code code = GET_CODE (x);
8551 rtx reg = bl->biv->dest_reg;
8552 enum machine_mode mode = GET_MODE (reg);
8553 struct induction *v;
8554 rtx arg, tem;
8555 #ifdef HAVE_cc0
8556 rtx new;
8557 #endif
8558 int arg_operand;
8559 const char *fmt;
8560 int i, j;
8562 switch (code)
8564 case REG:
8565 /* If we haven't already been able to do something with this BIV,
8566 we can't eliminate it. */
8567 if (x == reg)
8568 return 0;
8569 return 1;
8571 case SET:
8572 /* If this sets the BIV, it is not a problem. */
8573 if (SET_DEST (x) == reg)
8574 return 1;
8576 /* If this is an insn that defines a giv, it is also ok because
8577 it will go away when the giv is reduced. */
8578 for (v = bl->giv; v; v = v->next_iv)
8579 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8580 return 1;
8582 #ifdef HAVE_cc0
8583 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8585 /* Can replace with any giv that was reduced and
8586 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8587 Require a constant for MULT_VAL, so we know it's nonzero.
8588 ??? We disable this optimization to avoid potential
8589 overflows. */
8591 for (v = bl->giv; v; v = v->next_iv)
8592 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
8593 && v->add_val == const0_rtx
8594 && ! v->ignore && ! v->maybe_dead && v->always_computable
8595 && v->mode == mode
8596 && 0)
8598 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8599 continue;
8601 if (! eliminate_p)
8602 return 1;
8604 /* If the giv has the opposite direction of change,
8605 then reverse the comparison. */
8606 if (INTVAL (v->mult_val) < 0)
8607 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8608 const0_rtx, v->new_reg);
8609 else
8610 new = v->new_reg;
8612 /* We can probably test that giv's reduced reg. */
8613 if (validate_change (insn, &SET_SRC (x), new, 0))
8614 return 1;
8617 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8618 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8619 Require a constant for MULT_VAL, so we know it's nonzero.
8620 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8621 overflow problem. */
8623 for (v = bl->giv; v; v = v->next_iv)
8624 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
8625 && ! v->ignore && ! v->maybe_dead && v->always_computable
8626 && v->mode == mode
8627 && (GET_CODE (v->add_val) == SYMBOL_REF
8628 || GET_CODE (v->add_val) == LABEL_REF
8629 || GET_CODE (v->add_val) == CONST
8630 || (GET_CODE (v->add_val) == REG
8631 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
8633 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8634 continue;
8636 if (! eliminate_p)
8637 return 1;
8639 /* If the giv has the opposite direction of change,
8640 then reverse the comparison. */
8641 if (INTVAL (v->mult_val) < 0)
8642 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8643 v->new_reg);
8644 else
8645 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8646 copy_rtx (v->add_val));
8648 /* Replace biv with the giv's reduced register. */
8649 update_reg_last_use (v->add_val, insn);
8650 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8651 return 1;
8653 /* Insn doesn't support that constant or invariant. Copy it
8654 into a register (it will be a loop invariant.) */
8655 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8657 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
8658 where);
8660 /* Substitute the new register for its invariant value in
8661 the compare expression. */
8662 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8663 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8664 return 1;
8667 #endif
8668 break;
8670 case COMPARE:
8671 case EQ: case NE:
8672 case GT: case GE: case GTU: case GEU:
8673 case LT: case LE: case LTU: case LEU:
8674 /* See if either argument is the biv. */
8675 if (XEXP (x, 0) == reg)
8676 arg = XEXP (x, 1), arg_operand = 1;
8677 else if (XEXP (x, 1) == reg)
8678 arg = XEXP (x, 0), arg_operand = 0;
8679 else
8680 break;
8682 if (CONSTANT_P (arg))
8684 /* First try to replace with any giv that has constant positive
8685 mult_val and constant add_val. We might be able to support
8686 negative mult_val, but it seems complex to do it in general. */
8688 for (v = bl->giv; v; v = v->next_iv)
8689 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
8690 && (GET_CODE (v->add_val) == SYMBOL_REF
8691 || GET_CODE (v->add_val) == LABEL_REF
8692 || GET_CODE (v->add_val) == CONST
8693 || (GET_CODE (v->add_val) == REG
8694 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
8695 && ! v->ignore && ! v->maybe_dead && v->always_computable
8696 && v->mode == mode)
8698 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8699 continue;
8701 if (! eliminate_p)
8702 return 1;
8704 /* Replace biv with the giv's reduced reg. */
8705 XEXP (x, 1-arg_operand) = v->new_reg;
8707 /* If all constants are actually constant integers and
8708 the derived constant can be directly placed in the COMPARE,
8709 do so. */
8710 if (GET_CODE (arg) == CONST_INT
8711 && GET_CODE (v->mult_val) == CONST_INT
8712 && GET_CODE (v->add_val) == CONST_INT
8713 && validate_change (insn, &XEXP (x, arg_operand),
8714 GEN_INT (INTVAL (arg)
8715 * INTVAL (v->mult_val)
8716 + INTVAL (v->add_val)), 0))
8717 return 1;
8719 /* Otherwise, load it into a register. */
8720 tem = gen_reg_rtx (mode);
8721 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
8722 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
8723 return 1;
8725 /* If that failed, put back the change we made above. */
8726 XEXP (x, 1-arg_operand) = reg;
8729 /* Look for giv with positive constant mult_val and nonconst add_val.
8730 Insert insns to calculate new compare value.
8731 ??? Turn this off due to possible overflow. */
8733 for (v = bl->giv; v; v = v->next_iv)
8734 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
8735 && ! v->ignore && ! v->maybe_dead && v->always_computable
8736 && v->mode == mode
8737 && 0)
8739 rtx tem;
8741 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8742 continue;
8744 if (! eliminate_p)
8745 return 1;
8747 tem = gen_reg_rtx (mode);
8749 /* Replace biv with giv's reduced register. */
8750 validate_change (insn, &XEXP (x, 1 - arg_operand),
8751 v->new_reg, 1);
8753 /* Compute value to compare against. */
8754 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
8755 /* Use it in this insn. */
8756 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8757 if (apply_change_group ())
8758 return 1;
8761 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8763 if (invariant_p (arg) == 1)
8765 /* Look for giv with constant positive mult_val and nonconst
8766 add_val. Insert insns to compute new compare value.
8767 ??? Turn this off due to possible overflow. */
8769 for (v = bl->giv; v; v = v->next_iv)
8770 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
8771 && ! v->ignore && ! v->maybe_dead && v->always_computable
8772 && v->mode == mode
8773 && 0)
8775 rtx tem;
8777 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8778 continue;
8780 if (! eliminate_p)
8781 return 1;
8783 tem = gen_reg_rtx (mode);
8785 /* Replace biv with giv's reduced register. */
8786 validate_change (insn, &XEXP (x, 1 - arg_operand),
8787 v->new_reg, 1);
8789 /* Compute value to compare against. */
8790 emit_iv_add_mult (arg, v->mult_val, v->add_val,
8791 tem, where);
8792 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8793 if (apply_change_group ())
8794 return 1;
8798 /* This code has problems. Basically, you can't know when
8799 seeing if we will eliminate BL, whether a particular giv
8800 of ARG will be reduced. If it isn't going to be reduced,
8801 we can't eliminate BL. We can try forcing it to be reduced,
8802 but that can generate poor code.
8804 The problem is that the benefit of reducing TV, below should
8805 be increased if BL can actually be eliminated, but this means
8806 we might have to do a topological sort of the order in which
8807 we try to process biv. It doesn't seem worthwhile to do
8808 this sort of thing now. */
8810 #if 0
8811 /* Otherwise the reg compared with had better be a biv. */
8812 if (GET_CODE (arg) != REG
8813 || REG_IV_TYPE (REGNO (arg)) != BASIC_INDUCT)
8814 return 0;
8816 /* Look for a pair of givs, one for each biv,
8817 with identical coefficients. */
8818 for (v = bl->giv; v; v = v->next_iv)
8820 struct induction *tv;
8822 if (v->ignore || v->maybe_dead || v->mode != mode)
8823 continue;
8825 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
8826 if (! tv->ignore && ! tv->maybe_dead
8827 && rtx_equal_p (tv->mult_val, v->mult_val)
8828 && rtx_equal_p (tv->add_val, v->add_val)
8829 && tv->mode == mode)
8831 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8832 continue;
8834 if (! eliminate_p)
8835 return 1;
8837 /* Replace biv with its giv's reduced reg. */
8838 XEXP (x, 1-arg_operand) = v->new_reg;
8839 /* Replace other operand with the other giv's
8840 reduced reg. */
8841 XEXP (x, arg_operand) = tv->new_reg;
8842 return 1;
8845 #endif
8848 /* If we get here, the biv can't be eliminated. */
8849 return 0;
8851 case MEM:
8852 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8853 biv is used in it, since it will be replaced. */
8854 for (v = bl->giv; v; v = v->next_iv)
8855 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8856 return 1;
8857 break;
8859 default:
8860 break;
8863 /* See if any subexpression fails elimination. */
8864 fmt = GET_RTX_FORMAT (code);
8865 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8867 switch (fmt[i])
8869 case 'e':
8870 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
8871 eliminate_p, where))
8872 return 0;
8873 break;
8875 case 'E':
8876 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8877 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
8878 eliminate_p, where))
8879 return 0;
8880 break;
8884 return 1;
8887 /* Return nonzero if the last use of REG
8888 is in an insn following INSN in the same basic block. */
8890 static int
8891 last_use_this_basic_block (reg, insn)
8892 rtx reg;
8893 rtx insn;
8895 rtx n;
8896 for (n = insn;
8897 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8898 n = NEXT_INSN (n))
8900 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8901 return 1;
8903 return 0;
8906 /* Called via `note_stores' to record the initial value of a biv. Here we
8907 just record the location of the set and process it later. */
8909 static void
8910 record_initial (dest, set, data)
8911 rtx dest;
8912 rtx set;
8913 void *data ATTRIBUTE_UNUSED;
8915 struct iv_class *bl;
8917 if (GET_CODE (dest) != REG
8918 || REGNO (dest) >= max_reg_before_loop
8919 || REG_IV_TYPE (REGNO (dest)) != BASIC_INDUCT)
8920 return;
8922 bl = reg_biv_class[REGNO (dest)];
8924 /* If this is the first set found, record it. */
8925 if (bl->init_insn == 0)
8927 bl->init_insn = note_insn;
8928 bl->init_set = set;
8932 /* If any of the registers in X are "old" and currently have a last use earlier
8933 than INSN, update them to have a last use of INSN. Their actual last use
8934 will be the previous insn but it will not have a valid uid_luid so we can't
8935 use it. */
8937 static void
8938 update_reg_last_use (x, insn)
8939 rtx x;
8940 rtx insn;
8942 /* Check for the case where INSN does not have a valid luid. In this case,
8943 there is no need to modify the regno_last_uid, as this can only happen
8944 when code is inserted after the loop_end to set a pseudo's final value,
8945 and hence this insn will never be the last use of x. */
8946 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8947 && INSN_UID (insn) < max_uid_for_loop
8948 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
8949 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8950 else
8952 register int i, j;
8953 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8954 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8956 if (fmt[i] == 'e')
8957 update_reg_last_use (XEXP (x, i), insn);
8958 else if (fmt[i] == 'E')
8959 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8960 update_reg_last_use (XVECEXP (x, i, j), insn);
8965 /* Given a jump insn JUMP, return the condition that will cause it to branch
8966 to its JUMP_LABEL. If the condition cannot be understood, or is an
8967 inequality floating-point comparison which needs to be reversed, 0 will
8968 be returned.
8970 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8971 insn used in locating the condition was found. If a replacement test
8972 of the condition is desired, it should be placed in front of that
8973 insn and we will be sure that the inputs are still valid.
8975 The condition will be returned in a canonical form to simplify testing by
8976 callers. Specifically:
8978 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8979 (2) Both operands will be machine operands; (cc0) will have been replaced.
8980 (3) If an operand is a constant, it will be the second operand.
8981 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8982 for GE, GEU, and LEU. */
8985 get_condition (jump, earliest)
8986 rtx jump;
8987 rtx *earliest;
8989 enum rtx_code code;
8990 rtx prev = jump;
8991 rtx set;
8992 rtx tem;
8993 rtx op0, op1;
8994 int reverse_code = 0;
8995 int did_reverse_condition = 0;
8996 enum machine_mode mode;
8998 /* If this is not a standard conditional jump, we can't parse it. */
8999 if (GET_CODE (jump) != JUMP_INSN
9000 || ! condjump_p (jump) || simplejump_p (jump))
9001 return 0;
9003 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
9004 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
9005 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
9006 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
9008 if (earliest)
9009 *earliest = jump;
9011 /* If this branches to JUMP_LABEL when the condition is false, reverse
9012 the condition. */
9013 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
9014 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
9015 code = reverse_condition (code), did_reverse_condition ^= 1;
9017 /* If we are comparing a register with zero, see if the register is set
9018 in the previous insn to a COMPARE or a comparison operation. Perform
9019 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9020 in cse.c */
9022 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
9024 /* Set non-zero when we find something of interest. */
9025 rtx x = 0;
9027 #ifdef HAVE_cc0
9028 /* If comparison with cc0, import actual comparison from compare
9029 insn. */
9030 if (op0 == cc0_rtx)
9032 if ((prev = prev_nonnote_insn (prev)) == 0
9033 || GET_CODE (prev) != INSN
9034 || (set = single_set (prev)) == 0
9035 || SET_DEST (set) != cc0_rtx)
9036 return 0;
9038 op0 = SET_SRC (set);
9039 op1 = CONST0_RTX (GET_MODE (op0));
9040 if (earliest)
9041 *earliest = prev;
9043 #endif
9045 /* If this is a COMPARE, pick up the two things being compared. */
9046 if (GET_CODE (op0) == COMPARE)
9048 op1 = XEXP (op0, 1);
9049 op0 = XEXP (op0, 0);
9050 continue;
9052 else if (GET_CODE (op0) != REG)
9053 break;
9055 /* Go back to the previous insn. Stop if it is not an INSN. We also
9056 stop if it isn't a single set or if it has a REG_INC note because
9057 we don't want to bother dealing with it. */
9059 if ((prev = prev_nonnote_insn (prev)) == 0
9060 || GET_CODE (prev) != INSN
9061 || FIND_REG_INC_NOTE (prev, 0)
9062 || (set = single_set (prev)) == 0)
9063 break;
9065 /* If this is setting OP0, get what it sets it to if it looks
9066 relevant. */
9067 if (rtx_equal_p (SET_DEST (set), op0))
9069 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
9071 /* ??? We may not combine comparisons done in a CCmode with
9072 comparisons not done in a CCmode. This is to aid targets
9073 like Alpha that have an IEEE compliant EQ instruction, and
9074 a non-IEEE compliant BEQ instruction. The use of CCmode is
9075 actually artificial, simply to prevent the combination, but
9076 should not affect other platforms.
9078 However, we must allow VOIDmode comparisons to match either
9079 CCmode or non-CCmode comparison, because some ports have
9080 modeless comparisons inside branch patterns.
9082 ??? This mode check should perhaps look more like the mode check
9083 in simplify_comparison in combine. */
9085 if ((GET_CODE (SET_SRC (set)) == COMPARE
9086 || (((code == NE
9087 || (code == LT
9088 && GET_MODE_CLASS (inner_mode) == MODE_INT
9089 && (GET_MODE_BITSIZE (inner_mode)
9090 <= HOST_BITS_PER_WIDE_INT)
9091 && (STORE_FLAG_VALUE
9092 & ((HOST_WIDE_INT) 1
9093 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9094 #ifdef FLOAT_STORE_FLAG_VALUE
9095 || (code == LT
9096 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9097 && FLOAT_STORE_FLAG_VALUE < 0)
9098 #endif
9100 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9101 && (((GET_MODE_CLASS (mode) == MODE_CC)
9102 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9103 || mode == VOIDmode || inner_mode == VOIDmode))
9104 x = SET_SRC (set);
9105 else if (((code == EQ
9106 || (code == GE
9107 && (GET_MODE_BITSIZE (inner_mode)
9108 <= HOST_BITS_PER_WIDE_INT)
9109 && GET_MODE_CLASS (inner_mode) == MODE_INT
9110 && (STORE_FLAG_VALUE
9111 & ((HOST_WIDE_INT) 1
9112 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9113 #ifdef FLOAT_STORE_FLAG_VALUE
9114 || (code == GE
9115 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9116 && FLOAT_STORE_FLAG_VALUE < 0)
9117 #endif
9119 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9120 && (((GET_MODE_CLASS (mode) == MODE_CC)
9121 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9122 || mode == VOIDmode || inner_mode == VOIDmode))
9125 /* We might have reversed a LT to get a GE here. But this wasn't
9126 actually the comparison of data, so we don't flag that we
9127 have had to reverse the condition. */
9128 did_reverse_condition ^= 1;
9129 reverse_code = 1;
9130 x = SET_SRC (set);
9132 else
9133 break;
9136 else if (reg_set_p (op0, prev))
9137 /* If this sets OP0, but not directly, we have to give up. */
9138 break;
9140 if (x)
9142 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9143 code = GET_CODE (x);
9144 if (reverse_code)
9146 code = reverse_condition (code);
9147 did_reverse_condition ^= 1;
9148 reverse_code = 0;
9151 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9152 if (earliest)
9153 *earliest = prev;
9157 /* If constant is first, put it last. */
9158 if (CONSTANT_P (op0))
9159 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9161 /* If OP0 is the result of a comparison, we weren't able to find what
9162 was really being compared, so fail. */
9163 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9164 return 0;
9166 /* Canonicalize any ordered comparison with integers involving equality
9167 if we can do computations in the relevant mode and we do not
9168 overflow. */
9170 if (GET_CODE (op1) == CONST_INT
9171 && GET_MODE (op0) != VOIDmode
9172 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9174 HOST_WIDE_INT const_val = INTVAL (op1);
9175 unsigned HOST_WIDE_INT uconst_val = const_val;
9176 unsigned HOST_WIDE_INT max_val
9177 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9179 switch (code)
9181 case LE:
9182 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9183 code = LT, op1 = GEN_INT (const_val + 1);
9184 break;
9186 /* When cross-compiling, const_val might be sign-extended from
9187 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9188 case GE:
9189 if ((HOST_WIDE_INT) (const_val & max_val)
9190 != (((HOST_WIDE_INT) 1
9191 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9192 code = GT, op1 = GEN_INT (const_val - 1);
9193 break;
9195 case LEU:
9196 if (uconst_val < max_val)
9197 code = LTU, op1 = GEN_INT (uconst_val + 1);
9198 break;
9200 case GEU:
9201 if (uconst_val != 0)
9202 code = GTU, op1 = GEN_INT (uconst_val - 1);
9203 break;
9205 default:
9206 break;
9210 /* If this was floating-point and we reversed anything other than an
9211 EQ or NE, return zero. */
9212 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
9213 && did_reverse_condition && code != NE && code != EQ
9214 && ! flag_fast_math
9215 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9216 return 0;
9218 #ifdef HAVE_cc0
9219 /* Never return CC0; return zero instead. */
9220 if (op0 == cc0_rtx)
9221 return 0;
9222 #endif
9224 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9227 /* Similar to above routine, except that we also put an invariant last
9228 unless both operands are invariants. */
9231 get_condition_for_loop (x)
9232 rtx x;
9234 rtx comparison = get_condition (x, NULL_PTR);
9236 if (comparison == 0
9237 || ! invariant_p (XEXP (comparison, 0))
9238 || invariant_p (XEXP (comparison, 1)))
9239 return comparison;
9241 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9242 XEXP (comparison, 1), XEXP (comparison, 0));
9245 #ifdef HAVE_decrement_and_branch_on_count
9246 /* Instrument loop for insertion of bct instruction. We distinguish between
9247 loops with compile-time bounds and those with run-time bounds.
9248 Information from loop_iterations() is used to compute compile-time bounds.
9249 Run-time bounds should use loop preconditioning, but currently ignored.
9252 static void
9253 insert_bct (loop_start, loop_end, loop_info)
9254 rtx loop_start, loop_end;
9255 struct loop_info *loop_info;
9257 int i;
9258 unsigned HOST_WIDE_INT n_iterations;
9260 int increment_direction, compare_direction;
9262 /* If the loop condition is <= or >=, the number of iteration
9263 is 1 more than the range of the bounds of the loop. */
9264 int add_iteration = 0;
9266 enum machine_mode loop_var_mode = word_mode;
9268 int loop_num = uid_loop_num [INSN_UID (loop_start)];
9270 /* It's impossible to instrument a competely unrolled loop. */
9271 if (loop_info->unroll_number == loop_info->n_iterations)
9272 return;
9274 /* Make sure that the count register is not in use. */
9275 if (loop_used_count_register [loop_num])
9277 if (loop_dump_stream)
9278 fprintf (loop_dump_stream,
9279 "insert_bct %d: BCT instrumentation failed: count register already in use\n",
9280 loop_num);
9281 return;
9284 /* Make sure that the function has no indirect jumps. */
9285 if (indirect_jump_in_function)
9287 if (loop_dump_stream)
9288 fprintf (loop_dump_stream,
9289 "insert_bct %d: BCT instrumentation failed: indirect jump in function\n",
9290 loop_num);
9291 return;
9294 /* Make sure that the last loop insn is a conditional jump. */
9295 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
9296 || ! condjump_p (PREV_INSN (loop_end))
9297 || simplejump_p (PREV_INSN (loop_end)))
9299 if (loop_dump_stream)
9300 fprintf (loop_dump_stream,
9301 "insert_bct %d: BCT instrumentation failed: invalid jump at loop end\n",
9302 loop_num);
9303 return;
9306 /* Make sure that the loop does not contain a function call
9307 (the count register might be altered by the called function). */
9308 if (loop_info->has_call)
9310 if (loop_dump_stream)
9311 fprintf (loop_dump_stream,
9312 "insert_bct %d: BCT instrumentation failed: function call in loop\n",
9313 loop_num);
9314 return;
9317 /* Make sure that the loop does not jump via a table.
9318 (the count register might be used to perform the branch on table). */
9319 if (loop_info->has_tablejump)
9321 if (loop_dump_stream)
9322 fprintf (loop_dump_stream,
9323 "insert_bct %d: BCT instrumentation failed: computed branch in the loop\n",
9324 loop_num);
9325 return;
9328 /* Account for loop unrolling in instrumented iteration count. */
9329 if (loop_info->unroll_number > 1)
9330 n_iterations = loop_info->n_iterations / loop_info->unroll_number;
9331 else
9332 n_iterations = loop_info->n_iterations;
9334 if (n_iterations != 0 && n_iterations < 3)
9336 /* Allow an enclosing outer loop to benefit if possible. */
9337 if (loop_dump_stream)
9338 fprintf (loop_dump_stream,
9339 "insert_bct %d: Too few iterations to benefit from BCT optimization\n",
9340 loop_num);
9341 return;
9344 /* Try to instrument the loop. */
9346 /* Handle the simpler case, where the bounds are known at compile time. */
9347 if (n_iterations > 0)
9349 /* Mark all enclosing loops that they cannot use count register. */
9350 for (i = loop_num; i != -1; i = loop_outer_loop[i])
9351 loop_used_count_register[i] = 1;
9352 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
9353 return;
9356 /* Handle the more complex case, that the bounds are NOT known
9357 at compile time. In this case we generate run_time calculation
9358 of the number of iterations. */
9360 if (loop_info->iteration_var == 0)
9362 if (loop_dump_stream)
9363 fprintf (loop_dump_stream,
9364 "insert_bct %d: BCT Runtime Instrumentation failed: no loop iteration variable found\n",
9365 loop_num);
9366 return;
9369 if (GET_MODE_CLASS (GET_MODE (loop_info->iteration_var)) != MODE_INT
9370 || GET_MODE_SIZE (GET_MODE (loop_info->iteration_var)) != UNITS_PER_WORD)
9372 if (loop_dump_stream)
9373 fprintf (loop_dump_stream,
9374 "insert_bct %d: BCT Runtime Instrumentation failed: loop variable not integer\n",
9375 loop_num);
9376 return;
9379 /* With runtime bounds, if the compare is of the form '!=' we give up */
9380 if (loop_info->comparison_code == NE)
9382 if (loop_dump_stream)
9383 fprintf (loop_dump_stream,
9384 "insert_bct %d: BCT Runtime Instrumentation failed: runtime bounds with != comparison\n",
9385 loop_num);
9386 return;
9388 /* Use common loop preconditioning code instead. */
9389 #if 0
9390 else
9392 /* We rely on the existence of run-time guard to ensure that the
9393 loop executes at least once. */
9394 rtx sequence;
9395 rtx iterations_num_reg;
9397 unsigned HOST_WIDE_INT increment_value_abs
9398 = INTVAL (increment) * increment_direction;
9400 /* make sure that the increment is a power of two, otherwise (an
9401 expensive) divide is needed. */
9402 if (exact_log2 (increment_value_abs) == -1)
9404 if (loop_dump_stream)
9405 fprintf (loop_dump_stream,
9406 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
9407 return;
9410 /* compute the number of iterations */
9411 start_sequence ();
9413 rtx temp_reg;
9415 /* Again, the number of iterations is calculated by:
9417 ; compare-val - initial-val + (increment -1) + additional-iteration
9418 ; num_iterations = -----------------------------------------------------------------
9419 ; increment
9421 /* ??? Do we have to call copy_rtx here before passing rtx to
9422 expand_binop? */
9423 if (compare_direction > 0)
9425 /* <, <= :the loop variable is increasing */
9426 temp_reg = expand_binop (loop_var_mode, sub_optab,
9427 comparison_value, initial_value,
9428 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9430 else
9432 temp_reg = expand_binop (loop_var_mode, sub_optab,
9433 initial_value, comparison_value,
9434 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9437 if (increment_value_abs - 1 + add_iteration != 0)
9438 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
9439 GEN_INT (increment_value_abs - 1
9440 + add_iteration),
9441 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9443 if (increment_value_abs != 1)
9444 iterations_num_reg = expand_binop (loop_var_mode, asr_optab,
9445 temp_reg,
9446 GEN_INT (exact_log2 (increment_value_abs)),
9447 NULL_RTX, 0, OPTAB_LIB_WIDEN);
9448 else
9449 iterations_num_reg = temp_reg;
9451 sequence = gen_sequence ();
9452 end_sequence ();
9453 emit_insn_before (sequence, loop_start);
9454 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
9457 return;
9458 #endif /* Complex case */
9461 /* Instrument loop by inserting a bct in it as follows:
9462 1. A new counter register is created.
9463 2. In the head of the loop the new variable is initialized to the value
9464 passed in the loop_num_iterations parameter.
9465 3. At the end of the loop, comparison of the register with 0 is generated.
9466 The created comparison follows the pattern defined for the
9467 decrement_and_branch_on_count insn, so this insn will be generated.
9468 4. The branch on the old variable are deleted. The compare must remain
9469 because it might be used elsewhere. If the loop-variable or condition
9470 register are used elsewhere, they will be eliminated by flow. */
9472 static void
9473 instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
9474 rtx loop_start, loop_end;
9475 rtx loop_num_iterations;
9477 rtx counter_reg;
9478 rtx start_label;
9479 rtx sequence;
9481 if (HAVE_decrement_and_branch_on_count)
9483 if (loop_dump_stream)
9485 fputs ("instrument_bct: Inserting BCT (", loop_dump_stream);
9486 if (GET_CODE (loop_num_iterations) == CONST_INT)
9487 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC,
9488 INTVAL (loop_num_iterations));
9489 else
9490 fputs ("runtime", loop_dump_stream);
9491 fputs (" iterations)", loop_dump_stream);
9494 /* Discard original jump to continue loop. Original compare result
9495 may still be live, so it cannot be discarded explicitly. */
9496 delete_insn (PREV_INSN (loop_end));
9498 /* Insert the label which will delimit the start of the loop. */
9499 start_label = gen_label_rtx ();
9500 emit_label_after (start_label, loop_start);
9502 /* Insert initialization of the count register into the loop header. */
9503 start_sequence ();
9504 counter_reg = gen_reg_rtx (word_mode);
9505 emit_insn (gen_move_insn (counter_reg, loop_num_iterations));
9506 sequence = gen_sequence ();
9507 end_sequence ();
9508 emit_insn_before (sequence, loop_start);
9510 /* Insert new comparison on the count register instead of the
9511 old one, generating the needed BCT pattern (that will be
9512 later recognized by assembly generation phase). */
9513 emit_jump_insn_before (gen_decrement_and_branch_on_count (counter_reg,
9514 start_label),
9515 loop_end);
9516 LABEL_NUSES (start_label)++;
9520 #endif /* HAVE_decrement_and_branch_on_count */
9522 /* Scan the function and determine whether it has indirect (computed) jumps.
9524 This is taken mostly from flow.c; similar code exists elsewhere
9525 in the compiler. It may be useful to put this into rtlanal.c. */
9526 static int
9527 indirect_jump_in_function_p (start)
9528 rtx start;
9530 rtx insn;
9532 for (insn = start; insn; insn = NEXT_INSN (insn))
9533 if (computed_jump_p (insn))
9534 return 1;
9536 return 0;
9539 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9540 documentation for LOOP_MEMS for the definition of `appropriate'.
9541 This function is called from prescan_loop via for_each_rtx. */
9543 static int
9544 insert_loop_mem (mem, data)
9545 rtx *mem;
9546 void *data ATTRIBUTE_UNUSED;
9548 int i;
9549 rtx m = *mem;
9551 if (m == NULL_RTX)
9552 return 0;
9554 switch (GET_CODE (m))
9556 case MEM:
9557 break;
9559 case CONST_DOUBLE:
9560 /* We're not interested in the MEM associated with a
9561 CONST_DOUBLE, so there's no need to traverse into this. */
9562 return -1;
9564 default:
9565 /* This is not a MEM. */
9566 return 0;
9569 /* See if we've already seen this MEM. */
9570 for (i = 0; i < loop_mems_idx; ++i)
9571 if (rtx_equal_p (m, loop_mems[i].mem))
9573 if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
9574 /* The modes of the two memory accesses are different. If
9575 this happens, something tricky is going on, and we just
9576 don't optimize accesses to this MEM. */
9577 loop_mems[i].optimize = 0;
9579 return 0;
9582 /* Resize the array, if necessary. */
9583 if (loop_mems_idx == loop_mems_allocated)
9585 if (loop_mems_allocated != 0)
9586 loop_mems_allocated *= 2;
9587 else
9588 loop_mems_allocated = 32;
9590 loop_mems = (loop_mem_info*)
9591 xrealloc (loop_mems,
9592 loop_mems_allocated * sizeof (loop_mem_info));
9595 /* Actually insert the MEM. */
9596 loop_mems[loop_mems_idx].mem = m;
9597 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9598 because we can't put it in a register. We still store it in the
9599 table, though, so that if we see the same address later, but in a
9600 non-BLK mode, we'll not think we can optimize it at that point. */
9601 loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
9602 loop_mems[loop_mems_idx].reg = NULL_RTX;
9603 ++loop_mems_idx;
9605 return 0;
9608 /* Like load_mems, but also ensures that SET_IN_LOOP,
9609 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
9610 values after load_mems. */
9612 static void
9613 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
9614 insn_count)
9615 rtx scan_start;
9616 rtx end;
9617 rtx loop_top;
9618 rtx start;
9619 int *insn_count;
9621 int nregs = max_reg_num ();
9623 load_mems (scan_start, end, loop_top, start);
9625 /* Recalculate set_in_loop and friends since load_mems may have
9626 created new registers. */
9627 if (max_reg_num () > nregs)
9629 int i;
9630 int old_nregs;
9632 old_nregs = nregs;
9633 nregs = max_reg_num ();
9635 if ((unsigned) nregs > set_in_loop->num_elements)
9637 /* Grow all the arrays. */
9638 VARRAY_GROW (set_in_loop, nregs);
9639 VARRAY_GROW (n_times_set, nregs);
9640 VARRAY_GROW (may_not_optimize, nregs);
9641 VARRAY_GROW (reg_single_usage, nregs);
9643 /* Clear the arrays */
9644 bzero ((char *) &set_in_loop->data, nregs * sizeof (int));
9645 bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
9646 bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
9648 count_loop_regs_set (loop_top ? loop_top : start, end,
9649 may_not_optimize, reg_single_usage,
9650 insn_count, nregs);
9652 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9654 VARRAY_CHAR (may_not_optimize, i) = 1;
9655 VARRAY_INT (set_in_loop, i) = 1;
9658 #ifdef AVOID_CCMODE_COPIES
9659 /* Don't try to move insns which set CC registers if we should not
9660 create CCmode register copies. */
9661 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9662 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9663 VARRAY_CHAR (may_not_optimize, i) = 1;
9664 #endif
9666 /* Set n_times_set for the new registers. */
9667 bcopy ((char *) (&set_in_loop->data.i[0] + old_nregs),
9668 (char *) (&n_times_set->data.i[0] + old_nregs),
9669 (nregs - old_nregs) * sizeof (int));
9673 /* Move MEMs into registers for the duration of the loop. SCAN_START
9674 is the first instruction in the loop (as it is executed). The
9675 other parameters are as for next_insn_in_loop. */
9677 static void
9678 load_mems (scan_start, end, loop_top, start)
9679 rtx scan_start;
9680 rtx end;
9681 rtx loop_top;
9682 rtx start;
9684 int maybe_never = 0;
9685 int i;
9686 rtx p;
9687 rtx label = NULL_RTX;
9688 rtx end_label = NULL_RTX;
9690 if (loop_mems_idx > 0)
9692 /* Nonzero if the next instruction may never be executed. */
9693 int next_maybe_never = 0;
9695 /* Check to see if it's possible that some instructions in the
9696 loop are never executed. */
9697 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
9698 p != NULL_RTX && !maybe_never;
9699 p = next_insn_in_loop (p, scan_start, end, loop_top))
9701 if (GET_CODE (p) == CODE_LABEL)
9702 maybe_never = 1;
9703 else if (GET_CODE (p) == JUMP_INSN
9704 /* If we enter the loop in the middle, and scan
9705 around to the beginning, don't set maybe_never
9706 for that. This must be an unconditional jump,
9707 otherwise the code at the top of the loop might
9708 never be executed. Unconditional jumps are
9709 followed a by barrier then loop end. */
9710 && ! (GET_CODE (p) == JUMP_INSN
9711 && JUMP_LABEL (p) == loop_top
9712 && NEXT_INSN (NEXT_INSN (p)) == end
9713 && simplejump_p (p)))
9715 if (!condjump_p (p))
9716 /* Something complicated. */
9717 maybe_never = 1;
9718 else
9719 /* If there are any more instructions in the loop, they
9720 might not be reached. */
9721 next_maybe_never = 1;
9723 else if (next_maybe_never)
9724 maybe_never = 1;
9727 /* Actually move the MEMs. */
9728 for (i = 0; i < loop_mems_idx; ++i)
9730 int written = 0;
9731 rtx reg;
9732 rtx mem = loop_mems[i].mem;
9733 rtx mem_list_entry;
9735 if (MEM_VOLATILE_P (mem)
9736 || invariant_p (XEXP (mem, 0)) != 1)
9737 /* There's no telling whether or not MEM is modified. */
9738 loop_mems[i].optimize = 0;
9740 /* Go through the MEMs written to in the loop to see if this
9741 one is aliased by one of them. */
9742 mem_list_entry = loop_store_mems;
9743 while (mem_list_entry)
9745 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9746 written = 1;
9747 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9748 mem, rtx_varies_p))
9750 /* MEM is indeed aliased by this store. */
9751 loop_mems[i].optimize = 0;
9752 break;
9754 mem_list_entry = XEXP (mem_list_entry, 1);
9757 /* If this MEM is written to, we must be sure that there
9758 are no reads from another MEM that aliases this one. */
9759 if (loop_mems[i].optimize && written)
9761 int j;
9763 for (j = 0; j < loop_mems_idx; ++j)
9765 if (j == i)
9766 continue;
9767 else if (true_dependence (mem,
9768 VOIDmode,
9769 loop_mems[j].mem,
9770 rtx_varies_p))
9772 /* It's not safe to hoist loop_mems[i] out of
9773 the loop because writes to it might not be
9774 seen by reads from loop_mems[j]. */
9775 loop_mems[i].optimize = 0;
9776 break;
9781 if (maybe_never && may_trap_p (mem))
9782 /* We can't access the MEM outside the loop; it might
9783 cause a trap that wouldn't have happened otherwise. */
9784 loop_mems[i].optimize = 0;
9786 if (!loop_mems[i].optimize)
9787 /* We thought we were going to lift this MEM out of the
9788 loop, but later discovered that we could not. */
9789 continue;
9791 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9792 order to keep scan_loop from moving stores to this MEM
9793 out of the loop just because this REG is neither a
9794 user-variable nor used in the loop test. */
9795 reg = gen_reg_rtx (GET_MODE (mem));
9796 REG_USERVAR_P (reg) = 1;
9797 loop_mems[i].reg = reg;
9799 /* Now, replace all references to the MEM with the
9800 corresponding pesudos. */
9801 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
9802 p != NULL_RTX;
9803 p = next_insn_in_loop (p, scan_start, end, loop_top))
9805 rtx_and_int ri;
9806 ri.r = p;
9807 ri.i = i;
9808 for_each_rtx (&p, replace_loop_mem, &ri);
9811 if (!apply_change_group ())
9812 /* We couldn't replace all occurrences of the MEM. */
9813 loop_mems[i].optimize = 0;
9814 else
9816 rtx set;
9818 /* Load the memory immediately before START, which is
9819 the NOTE_LOOP_BEG. */
9820 set = gen_move_insn (reg, mem);
9821 emit_insn_before (set, start);
9823 if (written)
9825 if (label == NULL_RTX)
9827 /* We must compute the former
9828 right-after-the-end label before we insert
9829 the new one. */
9830 end_label = next_label (end);
9831 label = gen_label_rtx ();
9832 emit_label_after (label, end);
9835 /* Store the memory immediately after END, which is
9836 the NOTE_LOOP_END. */
9837 set = gen_move_insn (copy_rtx (mem), reg);
9838 emit_insn_after (set, label);
9841 if (loop_dump_stream)
9843 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9844 REGNO (reg), (written ? "r/w" : "r/o"));
9845 print_rtl (loop_dump_stream, mem);
9846 fputc ('\n', loop_dump_stream);
9852 if (label != NULL_RTX)
9854 /* Now, we need to replace all references to the previous exit
9855 label with the new one. */
9856 rtx_pair rr;
9857 rr.r1 = end_label;
9858 rr.r2 = label;
9860 for (p = start; p != end; p = NEXT_INSN (p))
9862 for_each_rtx (&p, replace_label, &rr);
9864 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9865 field. This is not handled by for_each_rtx because it doesn't
9866 handle unprinted ('0') fields. We need to update JUMP_LABEL
9867 because the immediately following unroll pass will use it.
9868 replace_label would not work anyways, because that only handles
9869 LABEL_REFs. */
9870 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9871 JUMP_LABEL (p) = label;
9876 /* Replace MEM with its associated pseudo register. This function is
9877 called from load_mems via for_each_rtx. DATA is actually an
9878 rtx_and_int * describing the instruction currently being scanned
9879 and the MEM we are currently replacing. */
9881 static int
9882 replace_loop_mem (mem, data)
9883 rtx *mem;
9884 void *data;
9886 rtx_and_int *ri;
9887 rtx insn;
9888 int i;
9889 rtx m = *mem;
9891 if (m == NULL_RTX)
9892 return 0;
9894 switch (GET_CODE (m))
9896 case MEM:
9897 break;
9899 case CONST_DOUBLE:
9900 /* We're not interested in the MEM associated with a
9901 CONST_DOUBLE, so there's no need to traverse into one. */
9902 return -1;
9904 default:
9905 /* This is not a MEM. */
9906 return 0;
9909 ri = (rtx_and_int*) data;
9910 i = ri->i;
9912 if (!rtx_equal_p (loop_mems[i].mem, m))
9913 /* This is not the MEM we are currently replacing. */
9914 return 0;
9916 insn = ri->r;
9918 /* Actually replace the MEM. */
9919 validate_change (insn, mem, loop_mems[i].reg, 1);
9921 return 0;
9924 /* Replace occurrences of the old exit label for the loop with the new
9925 one. DATA is an rtx_pair containing the old and new labels,
9926 respectively. */
9928 static int
9929 replace_label (x, data)
9930 rtx *x;
9931 void *data;
9933 rtx l = *x;
9934 rtx old_label = ((rtx_pair*) data)->r1;
9935 rtx new_label = ((rtx_pair*) data)->r2;
9937 if (l == NULL_RTX)
9938 return 0;
9940 if (GET_CODE (l) != LABEL_REF)
9941 return 0;
9943 if (XEXP (l, 0) != old_label)
9944 return 0;
9946 XEXP (l, 0) = new_label;
9947 ++LABEL_NUSES (new_label);
9948 --LABEL_NUSES (old_label);
9950 return 0;