1 /* Move constant computations out of loops.
2 Copyright (C) 1987, 88, 89, 91, 92, 93, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21 /* This is the loop optimization pass of the compiler.
22 It finds invariant computations within loops and moves them
23 to the beginning of the loop. Then it identifies basic and
24 general induction variables. Strength reduction is applied to the general
25 induction variables, and induction variable elimination is applied to
26 the basic induction variables.
28 It also finds cases where
29 a register is set within the loop by zero-extending a narrower value
30 and changes these to zero the entire register once before the loop
31 and merely copy the low part within the loop.
33 Most of the complexity is in heuristics to decide when it is worth
34 while to do these things. */
41 #include "insn-config.h"
42 #include "insn-flags.h"
44 #include "hard-reg-set.h"
50 /* Vector mapping INSN_UIDs to luids.
51 The luids are like uids but increase monotonically always.
52 We use them to see whether a jump comes from outside a given loop. */
56 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
57 number the insn is contained in. */
61 /* 1 + largest uid of any insn. */
65 /* 1 + luid of last insn. */
69 /* Number of loops detected in current function. Used as index to the
72 static int max_loop_num
;
74 /* Indexed by loop number, contains the first and last insn of each loop. */
76 static rtx
*loop_number_loop_starts
, *loop_number_loop_ends
;
78 /* For each loop, gives the containing loop number, -1 if none. */
82 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
83 really a loop (an insn outside the loop branches into it). */
85 static char *loop_invalid
;
87 /* Indexed by loop number, links together all LABEL_REFs which refer to
88 code labels outside the loop. Used by routines that need to know all
89 loop exits, such as final_biv_value and final_giv_value.
91 This does not include loop exits due to return instructions. This is
92 because all bivs and givs are pseudos, and hence must be dead after a
93 return, so the presense of a return does not affect any of the
94 optimizations that use this info. It is simpler to just not include return
95 instructions on this list. */
97 rtx
*loop_number_exit_labels
;
99 /* Holds the number of loop iterations. It is zero if the number could not be
100 calculated. Must be unsigned since the number of iterations can
101 be as high as 2^wordsize-1. For loops with a wider iterator, this number
102 will will be zero if the number of loop iterations is too large for an
103 unsigned integer to hold. */
105 unsigned HOST_WIDE_INT loop_n_iterations
;
107 /* Nonzero if there is a subroutine call in the current loop.
108 (unknown_address_altered is also nonzero in this case.) */
110 static int loop_has_call
;
112 /* Nonzero if there is a volatile memory reference in the current
115 static int loop_has_volatile
;
117 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
118 current loop. A continue statement will generate a branch to
119 NEXT_INSN (loop_continue). */
121 static rtx loop_continue
;
123 /* Indexed by register number, contains the number of times the reg
124 is set during the loop being scanned.
125 During code motion, a negative value indicates a reg that has been
126 made a candidate; in particular -2 means that it is an candidate that
127 we know is equal to a constant and -1 means that it is an candidate
128 not known equal to a constant.
129 After code motion, regs moved have 0 (which is accurate now)
130 while the failed candidates have the original number of times set.
132 Therefore, at all times, == 0 indicates an invariant register;
133 < 0 a conditionally invariant one. */
135 static short *n_times_set
;
137 /* Original value of n_times_set; same except that this value
138 is not set negative for a reg whose sets have been made candidates
139 and not set to 0 for a reg that is moved. */
141 static short *n_times_used
;
143 /* Index by register number, 1 indicates that the register
144 cannot be moved or strength reduced. */
146 static char *may_not_optimize
;
148 /* Nonzero means reg N has already been moved out of one loop.
149 This reduces the desire to move it out of another. */
151 static char *moved_once
;
153 /* Array of MEMs that are stored in this loop. If there are too many to fit
154 here, we just turn on unknown_address_altered. */
156 #define NUM_STORES 20
157 static rtx loop_store_mems
[NUM_STORES
];
159 /* Index of first available slot in above array. */
160 static int loop_store_mems_idx
;
162 /* Nonzero if we don't know what MEMs were changed in the current loop.
163 This happens if the loop contains a call (in which case `loop_has_call'
164 will also be set) or if we store into more than NUM_STORES MEMs. */
166 static int unknown_address_altered
;
168 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
169 static int num_movables
;
171 /* Count of memory write instructions discovered in the loop. */
172 static int num_mem_sets
;
174 /* Number of loops contained within the current one, including itself. */
175 static int loops_enclosed
;
177 /* Bound on pseudo register number before loop optimization.
178 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
179 int max_reg_before_loop
;
181 /* This obstack is used in product_cheap_p to allocate its rtl. It
182 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
183 If we used the same obstack that it did, we would be deallocating
186 static struct obstack temp_obstack
;
188 /* This is where the pointer to the obstack being used for RTL is stored. */
190 extern struct obstack
*rtl_obstack
;
192 #define obstack_chunk_alloc xmalloc
193 #define obstack_chunk_free free
195 extern char *oballoc ();
197 /* During the analysis of a loop, a chain of `struct movable's
198 is made to record all the movable insns found.
199 Then the entire chain can be scanned to decide which to move. */
203 rtx insn
; /* A movable insn */
204 rtx set_src
; /* The expression this reg is set from. */
205 rtx set_dest
; /* The destination of this SET. */
206 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
207 of any registers used within the LIBCALL. */
208 int consec
; /* Number of consecutive following insns
209 that must be moved with this one. */
210 int regno
; /* The register it sets */
211 short lifetime
; /* lifetime of that register;
212 may be adjusted when matching movables
213 that load the same value are found. */
214 short savings
; /* Number of insns we can move for this reg,
215 including other movables that force this
216 or match this one. */
217 unsigned int cond
: 1; /* 1 if only conditionally movable */
218 unsigned int force
: 1; /* 1 means MUST move this insn */
219 unsigned int global
: 1; /* 1 means reg is live outside this loop */
220 /* If PARTIAL is 1, GLOBAL means something different:
221 that the reg is live outside the range from where it is set
222 to the following label. */
223 unsigned int done
: 1; /* 1 inhibits further processing of this */
225 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
226 In particular, moving it does not make it
228 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
229 load SRC, rather than copying INSN. */
230 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
231 enum machine_mode savemode
; /* Nonzero means it is a mode for a low part
232 that we should avoid changing when clearing
233 the rest of the reg. */
234 struct movable
*match
; /* First entry for same value */
235 struct movable
*forces
; /* An insn that must be moved if this is */
236 struct movable
*next
;
239 FILE *loop_dump_stream
;
241 /* Forward declarations. */
243 static void find_and_verify_loops ();
244 static void mark_loop_jump ();
245 static void prescan_loop ();
246 static int reg_in_basic_block_p ();
247 static int consec_sets_invariant_p ();
248 static rtx
libcall_other_reg ();
249 static int labels_in_range_p ();
250 static void count_loop_regs_set ();
251 static void note_addr_stored ();
252 static int loop_reg_used_before_p ();
253 static void scan_loop ();
254 static void replace_call_address ();
255 static rtx
skip_consec_insns ();
256 static int libcall_benefit ();
257 static void ignore_some_movables ();
258 static void force_movables ();
259 static void combine_movables ();
260 static int rtx_equal_for_loop_p ();
261 static void move_movables ();
262 static void strength_reduce ();
263 static int valid_initial_value_p ();
264 static void find_mem_givs ();
265 static void record_biv ();
266 static void check_final_value ();
267 static void record_giv ();
268 static void update_giv_derive ();
269 static int basic_induction_var ();
270 static rtx
simplify_giv_expr ();
271 static int general_induction_var ();
272 static int consec_sets_giv ();
273 static int check_dbra_loop ();
274 static rtx
express_from ();
275 static int combine_givs_p ();
276 static void combine_givs ();
277 static int product_cheap_p ();
278 static int maybe_eliminate_biv ();
279 static int maybe_eliminate_biv_1 ();
280 static int last_use_this_basic_block ();
281 static void record_initial ();
282 static void update_reg_last_use ();
284 /* Relative gain of eliminating various kinds of operations. */
291 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
292 copy the value of the strength reduced giv to its original register. */
298 char *free_point
= (char *) oballoc (1);
299 rtx reg
= gen_rtx (REG
, word_mode
, 0);
301 add_cost
= rtx_cost (gen_rtx (PLUS
, word_mode
, reg
, reg
), SET
);
303 /* We multiply by 2 to reconcile the difference in scale between
304 these two ways of computing costs. Otherwise the cost of a copy
305 will be far less than the cost of an add. */
309 /* Free the objects we just allocated. */
312 /* Initialize the obstack used for rtl in product_cheap_p. */
313 gcc_obstack_init (&temp_obstack
);
316 /* Entry point of this file. Perform loop optimization
317 on the current function. F is the first insn of the function
318 and DUMPFILE is a stream for output of a trace of actions taken
319 (or 0 if none should be output). */
322 loop_optimize (f
, dumpfile
)
323 /* f is the first instruction of a chain of insns for one function */
331 loop_dump_stream
= dumpfile
;
333 init_recog_no_volatile ();
334 init_alias_analysis ();
336 max_reg_before_loop
= max_reg_num ();
338 moved_once
= (char *) alloca (max_reg_before_loop
);
339 bzero (moved_once
, max_reg_before_loop
);
343 /* Count the number of loops. */
346 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
348 if (GET_CODE (insn
) == NOTE
349 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
353 /* Don't waste time if no loops. */
354 if (max_loop_num
== 0)
357 /* Get size to use for tables indexed by uids.
358 Leave some space for labels allocated by find_and_verify_loops. */
359 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
361 uid_luid
= (int *) alloca (max_uid_for_loop
* sizeof (int));
362 uid_loop_num
= (int *) alloca (max_uid_for_loop
* sizeof (int));
364 bzero (uid_luid
, max_uid_for_loop
* sizeof (int));
365 bzero (uid_loop_num
, max_uid_for_loop
* sizeof (int));
367 /* Allocate tables for recording each loop. We set each entry, so they need
369 loop_number_loop_starts
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
370 loop_number_loop_ends
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
371 loop_outer_loop
= (int *) alloca (max_loop_num
* sizeof (int));
372 loop_invalid
= (char *) alloca (max_loop_num
* sizeof (char));
373 loop_number_exit_labels
= (rtx
*) alloca (max_loop_num
* sizeof (rtx
));
375 /* Find and process each loop.
376 First, find them, and record them in order of their beginnings. */
377 find_and_verify_loops (f
);
379 /* Now find all register lifetimes. This must be done after
380 find_and_verify_loops, because it might reorder the insns in the
382 reg_scan (f
, max_reg_num (), 1);
384 /* See if we went too far. */
385 if (get_max_uid () > max_uid_for_loop
)
388 /* Compute the mapping from uids to luids.
389 LUIDs are numbers assigned to insns, like uids,
390 except that luids increase monotonically through the code.
391 Don't assign luids to line-number NOTEs, so that the distance in luids
392 between two insns is not affected by -g. */
394 for (insn
= f
, i
= 0; insn
; insn
= NEXT_INSN (insn
))
397 if (GET_CODE (insn
) != NOTE
398 || NOTE_LINE_NUMBER (insn
) <= 0)
399 uid_luid
[INSN_UID (insn
)] = ++i
;
401 /* Give a line number note the same luid as preceding insn. */
402 uid_luid
[INSN_UID (insn
)] = i
;
407 /* Don't leave gaps in uid_luid for insns that have been
408 deleted. It is possible that the first or last insn
409 using some register has been deleted by cross-jumping.
410 Make sure that uid_luid for that former insn's uid
411 points to the general area where that insn used to be. */
412 for (i
= 0; i
< max_uid_for_loop
; i
++)
414 uid_luid
[0] = uid_luid
[i
];
415 if (uid_luid
[0] != 0)
418 for (i
= 0; i
< max_uid_for_loop
; i
++)
419 if (uid_luid
[i
] == 0)
420 uid_luid
[i
] = uid_luid
[i
- 1];
422 /* Create a mapping from loops to BLOCK tree nodes. */
423 if (flag_unroll_loops
&& write_symbols
!= NO_DEBUG
)
424 find_loop_tree_blocks ();
426 /* Now scan the loops, last ones first, since this means inner ones are done
427 before outer ones. */
428 for (i
= max_loop_num
-1; i
>= 0; i
--)
429 if (! loop_invalid
[i
] && loop_number_loop_ends
[i
])
430 scan_loop (loop_number_loop_starts
[i
], loop_number_loop_ends
[i
],
433 /* If debugging and unrolling loops, we must replicate the tree nodes
434 corresponding to the blocks inside the loop, so that the original one
435 to one mapping will remain. */
436 if (flag_unroll_loops
&& write_symbols
!= NO_DEBUG
)
437 unroll_block_trees ();
440 /* Optimize one loop whose start is LOOP_START and end is END.
441 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
442 NOTE_INSN_LOOP_END. */
444 /* ??? Could also move memory writes out of loops if the destination address
445 is invariant, the source is invariant, the memory write is not volatile,
446 and if we can prove that no read inside the loop can read this address
447 before the write occurs. If there is a read of this address after the
448 write, then we can also mark the memory read as invariant. */
451 scan_loop (loop_start
, end
, nregs
)
457 /* 1 if we are scanning insns that could be executed zero times. */
459 /* 1 if we are scanning insns that might never be executed
460 due to a subroutine call which might exit before they are reached. */
462 /* For a rotated loop that is entered near the bottom,
463 this is the label at the top. Otherwise it is zero. */
465 /* Jump insn that enters the loop, or 0 if control drops in. */
466 rtx loop_entry_jump
= 0;
467 /* Place in the loop where control enters. */
469 /* Number of insns in the loop. */
474 /* The SET from an insn, if it is the only SET in the insn. */
476 /* Chain describing insns movable in current loop. */
477 struct movable
*movables
= 0;
478 /* Last element in `movables' -- so we can add elements at the end. */
479 struct movable
*last_movable
= 0;
480 /* Ratio of extra register life span we can justify
481 for saving an instruction. More if loop doesn't call subroutines
482 since in that case saving an insn makes more difference
483 and more registers are available. */
485 /* If we have calls, contains the insn in which a register was used
486 if it was used exactly once; contains const0_rtx if it was used more
488 rtx
*reg_single_usage
= 0;
489 /* Nonzero if we are scanning instructions in a sub-loop. */
492 n_times_set
= (short *) alloca (nregs
* sizeof (short));
493 n_times_used
= (short *) alloca (nregs
* sizeof (short));
494 may_not_optimize
= (char *) alloca (nregs
);
496 /* Determine whether this loop starts with a jump down to a test at
497 the end. This will occur for a small number of loops with a test
498 that is too complex to duplicate in front of the loop.
500 We search for the first insn or label in the loop, skipping NOTEs.
501 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
502 (because we might have a loop executed only once that contains a
503 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
504 (in case we have a degenerate loop).
506 Note that if we mistakenly think that a loop is entered at the top
507 when, in fact, it is entered at the exit test, the only effect will be
508 slightly poorer optimization. Making the opposite error can generate
509 incorrect code. Since very few loops now start with a jump to the
510 exit test, the code here to detect that case is very conservative. */
512 for (p
= NEXT_INSN (loop_start
);
514 && GET_CODE (p
) != CODE_LABEL
&& GET_RTX_CLASS (GET_CODE (p
)) != 'i'
515 && (GET_CODE (p
) != NOTE
516 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
517 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
523 /* Set up variables describing this loop. */
524 prescan_loop (loop_start
, end
);
525 threshold
= (loop_has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
527 /* If loop has a jump before the first label,
528 the true entry is the target of that jump.
529 Start scan from there.
530 But record in LOOP_TOP the place where the end-test jumps
531 back to so we can scan that after the end of the loop. */
532 if (GET_CODE (p
) == JUMP_INSN
)
536 /* Loop entry must be unconditional jump (and not a RETURN) */
538 && JUMP_LABEL (p
) != 0
539 /* Check to see whether the jump actually
540 jumps out of the loop (meaning it's no loop).
541 This case can happen for things like
542 do {..} while (0). If this label was generated previously
543 by loop, we can't tell anything about it and have to reject
545 && INSN_UID (JUMP_LABEL (p
)) < max_uid_for_loop
546 && INSN_LUID (JUMP_LABEL (p
)) >= INSN_LUID (loop_start
)
547 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (end
))
549 loop_top
= next_label (scan_start
);
550 scan_start
= JUMP_LABEL (p
);
554 /* If SCAN_START was an insn created by loop, we don't know its luid
555 as required by loop_reg_used_before_p. So skip such loops. (This
556 test may never be true, but it's best to play it safe.)
558 Also, skip loops where we do not start scanning at a label. This
559 test also rejects loops starting with a JUMP_INSN that failed the
562 if (INSN_UID (scan_start
) >= max_uid_for_loop
563 || GET_CODE (scan_start
) != CODE_LABEL
)
565 if (loop_dump_stream
)
566 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
567 INSN_UID (loop_start
), INSN_UID (end
));
571 /* Count number of times each reg is set during this loop.
572 Set may_not_optimize[I] if it is not safe to move out
573 the setting of register I. If this loop has calls, set
574 reg_single_usage[I]. */
576 bzero (n_times_set
, nregs
* sizeof (short));
577 bzero (may_not_optimize
, nregs
);
581 reg_single_usage
= (rtx
*) alloca (nregs
* sizeof (rtx
));
582 bzero (reg_single_usage
, nregs
* sizeof (rtx
));
585 count_loop_regs_set (loop_top
? loop_top
: loop_start
, end
,
586 may_not_optimize
, reg_single_usage
, &insn_count
, nregs
);
588 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
589 may_not_optimize
[i
] = 1, n_times_set
[i
] = 1;
590 bcopy (n_times_set
, n_times_used
, nregs
* sizeof (short));
592 if (loop_dump_stream
)
594 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
595 INSN_UID (loop_start
), INSN_UID (end
), insn_count
);
597 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
598 INSN_UID (loop_continue
));
601 /* Scan through the loop finding insns that are safe to move.
602 Set n_times_set negative for the reg being set, so that
603 this reg will be considered invariant for subsequent insns.
604 We consider whether subsequent insns use the reg
605 in deciding whether it is worth actually moving.
607 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
608 and therefore it is possible that the insns we are scanning
609 would never be executed. At such times, we must make sure
610 that it is safe to execute the insn once instead of zero times.
611 When MAYBE_NEVER is 0, all insns will be executed at least once
612 so that is not a problem. */
618 /* At end of a straight-in loop, we are done.
619 At end of a loop entered at the bottom, scan the top. */
632 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
633 && find_reg_note (p
, REG_LIBCALL
, NULL_RTX
))
635 else if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
636 && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
639 if (GET_CODE (p
) == INSN
640 && (set
= single_set (p
))
641 && GET_CODE (SET_DEST (set
)) == REG
642 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
647 rtx src
= SET_SRC (set
);
648 rtx dependencies
= 0;
650 /* Figure out what to use as a source of this insn. If a REG_EQUIV
651 note is given or if a REG_EQUAL note with a constant operand is
652 specified, use it as the source and mark that we should move
653 this insn by calling emit_move_insn rather that duplicating the
656 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
658 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
660 src
= XEXP (temp
, 0), move_insn
= 1;
663 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
664 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
665 src
= XEXP (temp
, 0), move_insn
= 1;
666 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
668 src
= XEXP (temp
, 0);
669 /* A libcall block can use regs that don't appear in
670 the equivalent expression. To move the libcall,
671 we must move those regs too. */
672 dependencies
= libcall_other_reg (p
, src
);
676 /* Don't try to optimize a register that was made
677 by loop-optimization for an inner loop.
678 We don't know its life-span, so we can't compute the benefit. */
679 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
681 /* In order to move a register, we need to have one of three cases:
682 (1) it is used only in the same basic block as the set
683 (2) it is not a user variable and it is not used in the
684 exit test (this can cause the variable to be used
685 before it is set just like a user-variable).
686 (3) the set is guaranteed to be executed once the loop starts,
687 and the reg is not used until after that. */
688 else if (! ((! maybe_never
689 && ! loop_reg_used_before_p (set
, p
, loop_start
,
691 || (! REG_USERVAR_P (SET_DEST (set
))
692 && ! REG_LOOP_TEST_P (SET_DEST (set
)))
693 || reg_in_basic_block_p (p
, SET_DEST (set
))))
695 else if ((tem
= invariant_p (src
))
696 && (dependencies
== 0
697 || (tem2
= invariant_p (dependencies
)) != 0)
698 && (n_times_set
[REGNO (SET_DEST (set
))] == 1
700 = consec_sets_invariant_p (SET_DEST (set
),
701 n_times_set
[REGNO (SET_DEST (set
))],
703 /* If the insn can cause a trap (such as divide by zero),
704 can't move it unless it's guaranteed to be executed
705 once loop is entered. Even a function call might
706 prevent the trap insn from being reached
707 (since it might exit!) */
708 && ! ((maybe_never
|| call_passed
)
709 && may_trap_p (src
)))
711 register struct movable
*m
;
712 register int regno
= REGNO (SET_DEST (set
));
714 /* A potential lossage is where we have a case where two insns
715 can be combined as long as they are both in the loop, but
716 we move one of them outside the loop. For large loops,
717 this can lose. The most common case of this is the address
718 of a function being called.
720 Therefore, if this register is marked as being used exactly
721 once if we are in a loop with calls (a "large loop"), see if
722 we can replace the usage of this register with the source
723 of this SET. If we can, delete this insn.
725 Don't do this if P has a REG_RETVAL note or if we have
726 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
728 if (reg_single_usage
&& reg_single_usage
[regno
] != 0
729 && reg_single_usage
[regno
] != const0_rtx
730 && regno_first_uid
[regno
] == INSN_UID (p
)
731 && (regno_last_uid
[regno
]
732 == INSN_UID (reg_single_usage
[regno
]))
733 && n_times_set
[REGNO (SET_DEST (set
))] == 1
734 && ! side_effects_p (SET_SRC (set
))
735 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
736 #ifdef SMALL_REGISTER_CLASSES
737 && ! (GET_CODE (SET_SRC (set
)) == REG
738 && REGNO (SET_SRC (set
)) < FIRST_PSEUDO_REGISTER
)
740 /* This test is not redundant; SET_SRC (set) might be
741 a call-clobbered register and the life of REGNO
742 might span a call. */
743 && ! modified_between_p (SET_SRC (set
), p
,
744 reg_single_usage
[regno
])
745 && no_labels_between_p (p
, reg_single_usage
[regno
])
746 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
747 reg_single_usage
[regno
]))
749 /* Replace any usage in a REG_EQUAL note. */
750 REG_NOTES (reg_single_usage
[regno
])
751 = replace_rtx (REG_NOTES (reg_single_usage
[regno
]),
752 SET_DEST (set
), SET_SRC (set
));
755 NOTE_LINE_NUMBER (p
) = NOTE_INSN_DELETED
;
756 NOTE_SOURCE_FILE (p
) = 0;
757 n_times_set
[regno
] = 0;
761 m
= (struct movable
*) alloca (sizeof (struct movable
));
765 m
->dependencies
= dependencies
;
766 m
->set_dest
= SET_DEST (set
);
768 m
->consec
= n_times_set
[REGNO (SET_DEST (set
))] - 1;
772 m
->move_insn
= move_insn
;
773 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
774 m
->savemode
= VOIDmode
;
776 /* Set M->cond if either invariant_p or consec_sets_invariant_p
777 returned 2 (only conditionally invariant). */
778 m
->cond
= ((tem
| tem1
| tem2
) > 1);
779 m
->global
= (uid_luid
[regno_last_uid
[regno
]] > INSN_LUID (end
)
780 || uid_luid
[regno_first_uid
[regno
]] < INSN_LUID (loop_start
));
782 m
->lifetime
= (uid_luid
[regno_last_uid
[regno
]]
783 - uid_luid
[regno_first_uid
[regno
]]);
784 m
->savings
= n_times_used
[regno
];
785 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
786 m
->savings
+= libcall_benefit (p
);
787 n_times_set
[regno
] = move_insn
? -2 : -1;
788 /* Add M to the end of the chain MOVABLES. */
792 last_movable
->next
= m
;
797 /* Skip this insn, not checking REG_LIBCALL notes. */
798 p
= next_nonnote_insn (p
);
799 /* Skip the consecutive insns, if there are any. */
800 p
= skip_consec_insns (p
, m
->consec
);
801 /* Back up to the last insn of the consecutive group. */
802 p
= prev_nonnote_insn (p
);
804 /* We must now reset m->move_insn, m->is_equiv, and possibly
805 m->set_src to correspond to the effects of all the
807 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
809 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
812 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
813 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
814 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
819 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
822 /* If this register is always set within a STRICT_LOW_PART
823 or set to zero, then its high bytes are constant.
824 So clear them outside the loop and within the loop
825 just load the low bytes.
826 We must check that the machine has an instruction to do so.
827 Also, if the value loaded into the register
828 depends on the same register, this cannot be done. */
829 else if (SET_SRC (set
) == const0_rtx
830 && GET_CODE (NEXT_INSN (p
)) == INSN
831 && (set1
= single_set (NEXT_INSN (p
)))
832 && GET_CODE (set1
) == SET
833 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
834 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
835 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
837 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
839 register int regno
= REGNO (SET_DEST (set
));
840 if (n_times_set
[regno
] == 2)
842 register struct movable
*m
;
843 m
= (struct movable
*) alloca (sizeof (struct movable
));
846 m
->set_dest
= SET_DEST (set
);
854 /* If the insn may not be executed on some cycles,
855 we can't clear the whole reg; clear just high part.
856 Not even if the reg is used only within this loop.
863 Clearing x before the inner loop could clobber a value
864 being saved from the last time around the outer loop.
865 However, if the reg is not used outside this loop
866 and all uses of the register are in the same
867 basic block as the store, there is no problem.
869 If this insn was made by loop, we don't know its
870 INSN_LUID and hence must make a conservative
872 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
873 || (uid_luid
[regno_last_uid
[regno
]]
875 || (uid_luid
[regno_first_uid
[regno
]]
877 || (labels_in_range_p
878 (p
, uid_luid
[regno_first_uid
[regno
]])));
879 if (maybe_never
&& m
->global
)
880 m
->savemode
= GET_MODE (SET_SRC (set1
));
882 m
->savemode
= VOIDmode
;
886 m
->lifetime
= (uid_luid
[regno_last_uid
[regno
]]
887 - uid_luid
[regno_first_uid
[regno
]]);
889 n_times_set
[regno
] = -1;
890 /* Add M to the end of the chain MOVABLES. */
894 last_movable
->next
= m
;
899 /* Past a call insn, we get to insns which might not be executed
900 because the call might exit. This matters for insns that trap.
901 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
902 so they don't count. */
903 else if (GET_CODE (p
) == CALL_INSN
&& ! in_libcall
)
905 /* Past a label or a jump, we get to insns for which we
906 can't count on whether or how many times they will be
907 executed during each iteration. Therefore, we can
908 only move out sets of trivial variables
909 (those not used after the loop). */
910 /* This code appears in three places, once in scan_loop, and twice
911 in strength_reduce. */
912 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
913 /* If we enter the loop in the middle, and scan around to the
914 beginning, don't set maybe_never for that. This must be an
915 unconditional jump, otherwise the code at the top of the
916 loop might never be executed. Unconditional jumps are
917 followed a by barrier then loop end. */
918 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
919 && NEXT_INSN (NEXT_INSN (p
)) == end
920 && simplejump_p (p
)))
922 else if (GET_CODE (p
) == NOTE
)
924 /* At the virtual top of a converted loop, insns are again known to
925 be executed: logically, the loop begins here even though the exit
926 code has been duplicated. */
927 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
928 maybe_never
= call_passed
= 0;
929 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
931 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
936 /* If one movable subsumes another, ignore that other. */
938 ignore_some_movables (movables
);
940 /* For each movable insn, see if the reg that it loads
941 leads when it dies right into another conditionally movable insn.
942 If so, record that the second insn "forces" the first one,
943 since the second can be moved only if the first is. */
945 force_movables (movables
);
947 /* See if there are multiple movable insns that load the same value.
948 If there are, make all but the first point at the first one
949 through the `match' field, and add the priorities of them
950 all together as the priority of the first. */
952 combine_movables (movables
, nregs
);
954 /* Now consider each movable insn to decide whether it is worth moving.
955 Store 0 in n_times_set for each reg that is moved. */
957 move_movables (movables
, threshold
,
958 insn_count
, loop_start
, end
, nregs
);
960 /* Now candidates that still are negative are those not moved.
961 Change n_times_set to indicate that those are not actually invariant. */
962 for (i
= 0; i
< nregs
; i
++)
963 if (n_times_set
[i
] < 0)
964 n_times_set
[i
] = n_times_used
[i
];
966 if (flag_strength_reduce
)
967 strength_reduce (scan_start
, end
, loop_top
,
968 insn_count
, loop_start
, end
);
971 /* Add elements to *OUTPUT to record all the pseudo-regs
972 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
975 record_excess_regs (in_this
, not_in_this
, output
)
976 rtx in_this
, not_in_this
;
983 code
= GET_CODE (in_this
);
997 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
998 && ! reg_mentioned_p (in_this
, not_in_this
))
999 *output
= gen_rtx (EXPR_LIST
, VOIDmode
, in_this
, *output
);
1003 fmt
= GET_RTX_FORMAT (code
);
1004 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1011 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1012 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1016 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1022 /* Check what regs are referred to in the libcall block ending with INSN,
1023 aside from those mentioned in the equivalent value.
1024 If there are none, return 0.
1025 If there are one or more, return an EXPR_LIST containing all of them. */
1028 libcall_other_reg (insn
, equiv
)
1031 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1032 rtx p
= XEXP (note
, 0);
1035 /* First, find all the regs used in the libcall block
1036 that are not mentioned as inputs to the result. */
1040 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1041 || GET_CODE (p
) == CALL_INSN
)
1042 record_excess_regs (PATTERN (p
), equiv
, &output
);
1049 /* Return 1 if all uses of REG
1050 are between INSN and the end of the basic block. */
1053 reg_in_basic_block_p (insn
, reg
)
1056 int regno
= REGNO (reg
);
1059 if (regno_first_uid
[regno
] != INSN_UID (insn
))
1062 /* Search this basic block for the already recorded last use of the reg. */
1063 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1065 switch (GET_CODE (p
))
1072 /* Ordinary insn: if this is the last use, we win. */
1073 if (regno_last_uid
[regno
] == INSN_UID (p
))
1078 /* Jump insn: if this is the last use, we win. */
1079 if (regno_last_uid
[regno
] == INSN_UID (p
))
1081 /* Otherwise, it's the end of the basic block, so we lose. */
1086 /* It's the end of the basic block, so we lose. */
1091 /* The "last use" doesn't follow the "first use"?? */
1095 /* Compute the benefit of eliminating the insns in the block whose
1096 last insn is LAST. This may be a group of insns used to compute a
1097 value directly or can contain a library call. */
1100 libcall_benefit (last
)
1106 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1107 insn
!= last
; insn
= NEXT_INSN (insn
))
1109 if (GET_CODE (insn
) == CALL_INSN
)
1110 benefit
+= 10; /* Assume at least this many insns in a library
1112 else if (GET_CODE (insn
) == INSN
1113 && GET_CODE (PATTERN (insn
)) != USE
1114 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1121 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1124 skip_consec_insns (insn
, count
)
1128 for (; count
> 0; count
--)
1132 /* If first insn of libcall sequence, skip to end. */
1133 /* Do this at start of loop, since INSN is guaranteed to
1135 if (GET_CODE (insn
) != NOTE
1136 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1137 insn
= XEXP (temp
, 0);
1139 do insn
= NEXT_INSN (insn
);
1140 while (GET_CODE (insn
) == NOTE
);
1146 /* Ignore any movable whose insn falls within a libcall
1147 which is part of another movable.
1148 We make use of the fact that the movable for the libcall value
1149 was made later and so appears later on the chain. */
1152 ignore_some_movables (movables
)
1153 struct movable
*movables
;
1155 register struct movable
*m
, *m1
;
1157 for (m
= movables
; m
; m
= m
->next
)
1159 /* Is this a movable for the value of a libcall? */
1160 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1164 /* Check for earlier movables inside that range,
1165 and mark them invalid. We cannot use LUIDs here because
1166 insns created by loop.c for prior loops don't have LUIDs.
1167 Rather than reject all such insns from movables, we just
1168 explicitly check each insn in the libcall (since invariant
1169 libcalls aren't that common). */
1170 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1171 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1172 if (m1
->insn
== insn
)
1178 /* For each movable insn, see if the reg that it loads
1179 leads when it dies right into another conditionally movable insn.
1180 If so, record that the second insn "forces" the first one,
1181 since the second can be moved only if the first is. */
1184 force_movables (movables
)
1185 struct movable
*movables
;
1187 register struct movable
*m
, *m1
;
1188 for (m1
= movables
; m1
; m1
= m1
->next
)
1189 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1190 if (!m1
->partial
&& !m1
->done
)
1192 int regno
= m1
->regno
;
1193 for (m
= m1
->next
; m
; m
= m
->next
)
1194 /* ??? Could this be a bug? What if CSE caused the
1195 register of M1 to be used after this insn?
1196 Since CSE does not update regno_last_uid,
1197 this insn M->insn might not be where it dies.
1198 But very likely this doesn't matter; what matters is
1199 that M's reg is computed from M1's reg. */
1200 if (INSN_UID (m
->insn
) == regno_last_uid
[regno
]
1203 if (m
!= 0 && m
->set_src
== m1
->set_dest
1204 /* If m->consec, m->set_src isn't valid. */
1208 /* Increase the priority of the moving the first insn
1209 since it permits the second to be moved as well. */
1213 m1
->lifetime
+= m
->lifetime
;
1214 m1
->savings
+= m1
->savings
;
1219 /* Find invariant expressions that are equal and can be combined into
1223 combine_movables (movables
, nregs
)
1224 struct movable
*movables
;
1227 register struct movable
*m
;
1228 char *matched_regs
= (char *) alloca (nregs
);
1229 enum machine_mode mode
;
1231 /* Regs that are set more than once are not allowed to match
1232 or be matched. I'm no longer sure why not. */
1233 /* Perhaps testing m->consec_sets would be more appropriate here? */
1235 for (m
= movables
; m
; m
= m
->next
)
1236 if (m
->match
== 0 && n_times_used
[m
->regno
] == 1 && !m
->partial
)
1238 register struct movable
*m1
;
1239 int regno
= m
->regno
;
1241 bzero (matched_regs
, nregs
);
1242 matched_regs
[regno
] = 1;
1244 for (m1
= movables
; m1
; m1
= m1
->next
)
1245 if (m
!= m1
&& m1
->match
== 0 && n_times_used
[m1
->regno
] == 1
1246 /* A reg used outside the loop mustn't be eliminated. */
1248 /* A reg used for zero-extending mustn't be eliminated. */
1250 && (matched_regs
[m1
->regno
]
1253 /* Can combine regs with different modes loaded from the
1254 same constant only if the modes are the same or
1255 if both are integer modes with M wider or the same
1256 width as M1. The check for integer is redundant, but
1257 safe, since the only case of differing destination
1258 modes with equal sources is when both sources are
1259 VOIDmode, i.e., CONST_INT. */
1260 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1261 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1262 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1263 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1264 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1265 /* See if the source of M1 says it matches M. */
1266 && ((GET_CODE (m1
->set_src
) == REG
1267 && matched_regs
[REGNO (m1
->set_src
)])
1268 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1270 && ((m
->dependencies
== m1
->dependencies
)
1271 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1273 m
->lifetime
+= m1
->lifetime
;
1274 m
->savings
+= m1
->savings
;
1277 matched_regs
[m1
->regno
] = 1;
1281 /* Now combine the regs used for zero-extension.
1282 This can be done for those not marked `global'
1283 provided their lives don't overlap. */
1285 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1286 mode
= GET_MODE_WIDER_MODE (mode
))
1288 register struct movable
*m0
= 0;
1290 /* Combine all the registers for extension from mode MODE.
1291 Don't combine any that are used outside this loop. */
1292 for (m
= movables
; m
; m
= m
->next
)
1293 if (m
->partial
&& ! m
->global
1294 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1296 register struct movable
*m1
;
1297 int first
= uid_luid
[regno_first_uid
[m
->regno
]];
1298 int last
= uid_luid
[regno_last_uid
[m
->regno
]];
1302 /* First one: don't check for overlap, just record it. */
1307 /* Make sure they extend to the same mode.
1308 (Almost always true.) */
1309 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1312 /* We already have one: check for overlap with those
1313 already combined together. */
1314 for (m1
= movables
; m1
!= m
; m1
= m1
->next
)
1315 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1316 if (! (uid_luid
[regno_first_uid
[m1
->regno
]] > last
1317 || uid_luid
[regno_last_uid
[m1
->regno
]] < first
))
1320 /* No overlap: we can combine this with the others. */
1321 m0
->lifetime
+= m
->lifetime
;
1322 m0
->savings
+= m
->savings
;
1331 /* Return 1 if regs X and Y will become the same if moved. */
1334 regs_match_p (x
, y
, movables
)
1336 struct movable
*movables
;
1340 struct movable
*mx
, *my
;
1342 for (mx
= movables
; mx
; mx
= mx
->next
)
1343 if (mx
->regno
== xn
)
1346 for (my
= movables
; my
; my
= my
->next
)
1347 if (my
->regno
== yn
)
1351 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1353 || mx
== my
->match
));
1356 /* Return 1 if X and Y are identical-looking rtx's.
1357 This is the Lisp function EQUAL for rtx arguments.
1359 If two registers are matching movables or a movable register and an
1360 equivalent constant, consider them equal. */
1363 rtx_equal_for_loop_p (x
, y
, movables
)
1365 struct movable
*movables
;
1369 register struct movable
*m
;
1370 register enum rtx_code code
;
1375 if (x
== 0 || y
== 0)
1378 code
= GET_CODE (x
);
1380 /* If we have a register and a constant, they may sometimes be
1382 if (GET_CODE (x
) == REG
&& n_times_set
[REGNO (x
)] == -2
1384 for (m
= movables
; m
; m
= m
->next
)
1385 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1386 && rtx_equal_p (m
->set_src
, y
))
1389 else if (GET_CODE (y
) == REG
&& n_times_set
[REGNO (y
)] == -2
1391 for (m
= movables
; m
; m
= m
->next
)
1392 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1393 && rtx_equal_p (m
->set_src
, x
))
1396 /* Otherwise, rtx's of different codes cannot be equal. */
1397 if (code
!= GET_CODE (y
))
1400 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1401 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1403 if (GET_MODE (x
) != GET_MODE (y
))
1406 /* These three types of rtx's can be compared nonrecursively. */
1408 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1410 if (code
== LABEL_REF
)
1411 return XEXP (x
, 0) == XEXP (y
, 0);
1412 if (code
== SYMBOL_REF
)
1413 return XSTR (x
, 0) == XSTR (y
, 0);
1415 /* Compare the elements. If any pair of corresponding elements
1416 fail to match, return 0 for the whole things. */
1418 fmt
= GET_RTX_FORMAT (code
);
1419 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1424 if (XWINT (x
, i
) != XWINT (y
, i
))
1429 if (XINT (x
, i
) != XINT (y
, i
))
1434 /* Two vectors must have the same length. */
1435 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1438 /* And the corresponding elements must match. */
1439 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1440 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
), movables
) == 0)
1445 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
) == 0)
1450 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1455 /* These are just backpointers, so they don't matter. */
1461 /* It is believed that rtx's at this level will never
1462 contain anything but integers and other rtx's,
1463 except for within LABEL_REFs and SYMBOL_REFs. */
1471 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1472 insns in INSNS which use thet reference. */
1475 add_label_notes (x
, insns
)
1479 enum rtx_code code
= GET_CODE (x
);
1484 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1486 rtx next
= next_real_insn (XEXP (x
, 0));
1488 /* Don't record labels that refer to dispatch tables.
1489 This is not necessary, since the tablejump references the same label.
1490 And if we did record them, flow.c would make worse code. */
1492 || ! (GET_CODE (next
) == JUMP_INSN
1493 && (GET_CODE (PATTERN (next
)) == ADDR_VEC
1494 || GET_CODE (PATTERN (next
)) == ADDR_DIFF_VEC
)))
1496 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1497 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1498 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_LABEL
, XEXP (x
, 0),
1504 fmt
= GET_RTX_FORMAT (code
);
1505 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1508 add_label_notes (XEXP (x
, i
), insns
);
1509 else if (fmt
[i
] == 'E')
1510 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1511 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1515 /* Scan MOVABLES, and move the insns that deserve to be moved.
1516 If two matching movables are combined, replace one reg with the
1517 other throughout. */
1520 move_movables (movables
, threshold
, insn_count
, loop_start
, end
, nregs
)
1521 struct movable
*movables
;
1529 register struct movable
*m
;
1531 /* Map of pseudo-register replacements to handle combining
1532 when we move several insns that load the same value
1533 into different pseudo-registers. */
1534 rtx
*reg_map
= (rtx
*) alloca (nregs
* sizeof (rtx
));
1535 char *already_moved
= (char *) alloca (nregs
);
1537 bzero (already_moved
, nregs
);
1538 bzero (reg_map
, nregs
* sizeof (rtx
));
1542 for (m
= movables
; m
; m
= m
->next
)
1544 /* Describe this movable insn. */
1546 if (loop_dump_stream
)
1548 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1549 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1551 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1553 fprintf (loop_dump_stream
, "cond ");
1555 fprintf (loop_dump_stream
, "force ");
1557 fprintf (loop_dump_stream
, "global ");
1559 fprintf (loop_dump_stream
, "done ");
1561 fprintf (loop_dump_stream
, "move-insn ");
1563 fprintf (loop_dump_stream
, "matches %d ",
1564 INSN_UID (m
->match
->insn
));
1566 fprintf (loop_dump_stream
, "forces %d ",
1567 INSN_UID (m
->forces
->insn
));
1570 /* Count movables. Value used in heuristics in strength_reduce. */
1573 /* Ignore the insn if it's already done (it matched something else).
1574 Otherwise, see if it is now safe to move. */
1578 || (1 == invariant_p (m
->set_src
)
1579 && (m
->dependencies
== 0
1580 || 1 == invariant_p (m
->dependencies
))
1582 || 1 == consec_sets_invariant_p (m
->set_dest
,
1585 && (! m
->forces
|| m
->forces
->done
))
1589 int savings
= m
->savings
;
1591 /* We have an insn that is safe to move.
1592 Compute its desirability. */
1597 if (loop_dump_stream
)
1598 fprintf (loop_dump_stream
, "savings %d ", savings
);
1600 if (moved_once
[regno
])
1604 if (loop_dump_stream
)
1605 fprintf (loop_dump_stream
, "halved since already moved ");
1608 /* An insn MUST be moved if we already moved something else
1609 which is safe only if this one is moved too: that is,
1610 if already_moved[REGNO] is nonzero. */
1612 /* An insn is desirable to move if the new lifetime of the
1613 register is no more than THRESHOLD times the old lifetime.
1614 If it's not desirable, it means the loop is so big
1615 that moving won't speed things up much,
1616 and it is liable to make register usage worse. */
1618 /* It is also desirable to move if it can be moved at no
1619 extra cost because something else was already moved. */
1621 if (already_moved
[regno
]
1622 || (threshold
* savings
* m
->lifetime
) >= insn_count
1623 || (m
->forces
&& m
->forces
->done
1624 && n_times_used
[m
->forces
->regno
] == 1))
1627 register struct movable
*m1
;
1630 /* Now move the insns that set the reg. */
1632 if (m
->partial
&& m
->match
)
1636 /* Find the end of this chain of matching regs.
1637 Thus, we load each reg in the chain from that one reg.
1638 And that reg is loaded with 0 directly,
1639 since it has ->match == 0. */
1640 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1641 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1642 SET_DEST (PATTERN (m1
->insn
)));
1643 i1
= emit_insn_before (newpat
, loop_start
);
1645 /* Mark the moved, invariant reg as being allowed to
1646 share a hard reg with the other matching invariant. */
1647 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1648 r1
= SET_DEST (PATTERN (m
->insn
));
1649 r2
= SET_DEST (PATTERN (m1
->insn
));
1650 regs_may_share
= gen_rtx (EXPR_LIST
, VOIDmode
, r1
,
1651 gen_rtx (EXPR_LIST
, VOIDmode
, r2
,
1653 delete_insn (m
->insn
);
1658 if (loop_dump_stream
)
1659 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1661 /* If we are to re-generate the item being moved with a
1662 new move insn, first delete what we have and then emit
1663 the move insn before the loop. */
1664 else if (m
->move_insn
)
1668 for (count
= m
->consec
; count
>= 0; count
--)
1670 /* If this is the first insn of a library call sequence,
1672 if (GET_CODE (p
) != NOTE
1673 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1676 /* If this is the last insn of a libcall sequence, then
1677 delete every insn in the sequence except the last.
1678 The last insn is handled in the normal manner. */
1679 if (GET_CODE (p
) != NOTE
1680 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1682 temp
= XEXP (temp
, 0);
1684 temp
= delete_insn (temp
);
1687 p
= delete_insn (p
);
1691 emit_move_insn (m
->set_dest
, m
->set_src
);
1692 temp
= get_insns ();
1695 add_label_notes (m
->set_src
, temp
);
1697 i1
= emit_insns_before (temp
, loop_start
);
1698 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1700 = gen_rtx (EXPR_LIST
,
1701 m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1702 m
->set_src
, REG_NOTES (i1
));
1704 if (loop_dump_stream
)
1705 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1707 /* The more regs we move, the less we like moving them. */
1712 for (count
= m
->consec
; count
>= 0; count
--)
1716 /* If first insn of libcall sequence, skip to end. */
1717 /* Do this at start of loop, since p is guaranteed to
1719 if (GET_CODE (p
) != NOTE
1720 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1723 /* If last insn of libcall sequence, move all
1724 insns except the last before the loop. The last
1725 insn is handled in the normal manner. */
1726 if (GET_CODE (p
) != NOTE
1727 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1731 rtx fn_address_insn
= 0;
1734 for (temp
= XEXP (temp
, 0); temp
!= p
;
1735 temp
= NEXT_INSN (temp
))
1741 if (GET_CODE (temp
) == NOTE
)
1744 body
= PATTERN (temp
);
1746 /* Find the next insn after TEMP,
1747 not counting USE or NOTE insns. */
1748 for (next
= NEXT_INSN (temp
); next
!= p
;
1749 next
= NEXT_INSN (next
))
1750 if (! (GET_CODE (next
) == INSN
1751 && GET_CODE (PATTERN (next
)) == USE
)
1752 && GET_CODE (next
) != NOTE
)
1755 /* If that is the call, this may be the insn
1756 that loads the function address.
1758 Extract the function address from the insn
1759 that loads it into a register.
1760 If this insn was cse'd, we get incorrect code.
1762 So emit a new move insn that copies the
1763 function address into the register that the
1764 call insn will use. flow.c will delete any
1765 redundant stores that we have created. */
1766 if (GET_CODE (next
) == CALL_INSN
1767 && GET_CODE (body
) == SET
1768 && GET_CODE (SET_DEST (body
)) == REG
1769 && (n
= find_reg_note (temp
, REG_EQUAL
,
1772 fn_reg
= SET_SRC (body
);
1773 if (GET_CODE (fn_reg
) != REG
)
1774 fn_reg
= SET_DEST (body
);
1775 fn_address
= XEXP (n
, 0);
1776 fn_address_insn
= temp
;
1778 /* We have the call insn.
1779 If it uses the register we suspect it might,
1780 load it with the correct address directly. */
1781 if (GET_CODE (temp
) == CALL_INSN
1783 && reg_referenced_p (fn_reg
, body
))
1784 emit_insn_after (gen_move_insn (fn_reg
,
1788 if (GET_CODE (temp
) == CALL_INSN
)
1789 i1
= emit_call_insn_before (body
, loop_start
);
1791 i1
= emit_insn_before (body
, loop_start
);
1794 if (temp
== fn_address_insn
)
1795 fn_address_insn
= i1
;
1796 REG_NOTES (i1
) = REG_NOTES (temp
);
1800 if (m
->savemode
!= VOIDmode
)
1802 /* P sets REG to zero; but we should clear only
1803 the bits that are not covered by the mode
1805 rtx reg
= m
->set_dest
;
1811 (GET_MODE (reg
), and_optab
, reg
,
1812 GEN_INT ((((HOST_WIDE_INT
) 1
1813 << GET_MODE_BITSIZE (m
->savemode
)))
1815 reg
, 1, OPTAB_LIB_WIDEN
);
1819 emit_move_insn (reg
, tem
);
1820 sequence
= gen_sequence ();
1822 i1
= emit_insn_before (sequence
, loop_start
);
1824 else if (GET_CODE (p
) == CALL_INSN
)
1825 i1
= emit_call_insn_before (PATTERN (p
), loop_start
);
1827 i1
= emit_insn_before (PATTERN (p
), loop_start
);
1829 REG_NOTES (i1
) = REG_NOTES (p
);
1831 /* If there is a REG_EQUAL note present whose value is
1832 not loop invariant, then delete it, since it may
1833 cause problems with later optimization passes.
1834 It is possible for cse to create such notes
1835 like this as a result of record_jump_cond. */
1837 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1838 && ! invariant_p (XEXP (temp
, 0)))
1839 remove_note (i1
, temp
);
1844 if (loop_dump_stream
)
1845 fprintf (loop_dump_stream
, " moved to %d",
1849 /* This isn't needed because REG_NOTES is copied
1850 below and is wrong since P might be a PARALLEL. */
1851 if (REG_NOTES (i1
) == 0
1852 && ! m
->partial
/* But not if it's a zero-extend clr. */
1853 && ! m
->global
/* and not if used outside the loop
1854 (since it might get set outside). */
1855 && CONSTANT_P (SET_SRC (PATTERN (p
))))
1857 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
1858 SET_SRC (PATTERN (p
)), REG_NOTES (i1
));
1861 /* If library call, now fix the REG_NOTES that contain
1862 insn pointers, namely REG_LIBCALL on FIRST
1863 and REG_RETVAL on I1. */
1864 if (temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
))
1866 XEXP (temp
, 0) = first
;
1867 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
1868 XEXP (temp
, 0) = i1
;
1872 do p
= NEXT_INSN (p
);
1873 while (p
&& GET_CODE (p
) == NOTE
);
1876 /* The more regs we move, the less we like moving them. */
1880 /* Any other movable that loads the same register
1882 already_moved
[regno
] = 1;
1884 /* This reg has been moved out of one loop. */
1885 moved_once
[regno
] = 1;
1887 /* The reg set here is now invariant. */
1889 n_times_set
[regno
] = 0;
1893 /* Change the length-of-life info for the register
1894 to say it lives at least the full length of this loop.
1895 This will help guide optimizations in outer loops. */
1897 if (uid_luid
[regno_first_uid
[regno
]] > INSN_LUID (loop_start
))
1898 /* This is the old insn before all the moved insns.
1899 We can't use the moved insn because it is out of range
1900 in uid_luid. Only the old insns have luids. */
1901 regno_first_uid
[regno
] = INSN_UID (loop_start
);
1902 if (uid_luid
[regno_last_uid
[regno
]] < INSN_LUID (end
))
1903 regno_last_uid
[regno
] = INSN_UID (end
);
1905 /* Combine with this moved insn any other matching movables. */
1908 for (m1
= movables
; m1
; m1
= m1
->next
)
1913 /* Schedule the reg loaded by M1
1914 for replacement so that shares the reg of M.
1915 If the modes differ (only possible in restricted
1916 circumstances, make a SUBREG. */
1917 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
1918 reg_map
[m1
->regno
] = m
->set_dest
;
1921 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
1924 /* Get rid of the matching insn
1925 and prevent further processing of it. */
1928 /* if library call, delete all insn except last, which
1930 if (temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
1933 for (temp
= XEXP (temp
, 0); temp
!= m1
->insn
;
1934 temp
= NEXT_INSN (temp
))
1937 delete_insn (m1
->insn
);
1939 /* Any other movable that loads the same register
1941 already_moved
[m1
->regno
] = 1;
1943 /* The reg merged here is now invariant,
1944 if the reg it matches is invariant. */
1946 n_times_set
[m1
->regno
] = 0;
1949 else if (loop_dump_stream
)
1950 fprintf (loop_dump_stream
, "not desirable");
1952 else if (loop_dump_stream
&& !m
->match
)
1953 fprintf (loop_dump_stream
, "not safe");
1955 if (loop_dump_stream
)
1956 fprintf (loop_dump_stream
, "\n");
1960 new_start
= loop_start
;
1962 /* Go through all the instructions in the loop, making
1963 all the register substitutions scheduled in REG_MAP. */
1964 for (p
= new_start
; p
!= end
; p
= NEXT_INSN (p
))
1965 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1966 || GET_CODE (p
) == CALL_INSN
)
1968 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
1969 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
1975 /* Scan X and replace the address of any MEM in it with ADDR.
1976 REG is the address that MEM should have before the replacement. */
1979 replace_call_address (x
, reg
, addr
)
1982 register enum rtx_code code
;
1988 code
= GET_CODE (x
);
2002 /* Short cut for very common case. */
2003 replace_call_address (XEXP (x
, 1), reg
, addr
);
2007 /* Short cut for very common case. */
2008 replace_call_address (XEXP (x
, 0), reg
, addr
);
2012 /* If this MEM uses a reg other than the one we expected,
2013 something is wrong. */
2014 if (XEXP (x
, 0) != reg
)
2020 fmt
= GET_RTX_FORMAT (code
);
2021 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2024 replace_call_address (XEXP (x
, i
), reg
, addr
);
2028 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2029 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2035 /* Return the number of memory refs to addresses that vary
2039 count_nonfixed_reads (x
)
2042 register enum rtx_code code
;
2050 code
= GET_CODE (x
);
2064 return ((invariant_p (XEXP (x
, 0)) != 1)
2065 + count_nonfixed_reads (XEXP (x
, 0)));
2069 fmt
= GET_RTX_FORMAT (code
);
2070 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2073 value
+= count_nonfixed_reads (XEXP (x
, i
));
2077 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2078 value
+= count_nonfixed_reads (XVECEXP (x
, i
, j
));
2086 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2087 Replace it with an instruction to load just the low bytes
2088 if the machine supports such an instruction,
2089 and insert above LOOP_START an instruction to clear the register. */
2092 constant_high_bytes (p
, loop_start
)
2096 register int insn_code_number
;
2098 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2099 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2101 new = gen_rtx (SET
, VOIDmode
,
2102 gen_rtx (STRICT_LOW_PART
, VOIDmode
,
2103 gen_rtx (SUBREG
, GET_MODE (XEXP (SET_SRC (PATTERN (p
)), 0)),
2104 SET_DEST (PATTERN (p
)),
2106 XEXP (SET_SRC (PATTERN (p
)), 0));
2107 insn_code_number
= recog (new, p
);
2109 if (insn_code_number
)
2113 /* Clear destination register before the loop. */
2114 emit_insn_before (gen_rtx (SET
, VOIDmode
,
2115 SET_DEST (PATTERN (p
)),
2119 /* Inside the loop, just load the low part. */
2125 /* Scan a loop setting the variables `unknown_address_altered',
2126 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2127 and `loop_has_volatile'.
2128 Also, fill in the array `loop_store_mems'. */
2131 prescan_loop (start
, end
)
2134 register int level
= 1;
2137 unknown_address_altered
= 0;
2139 loop_has_volatile
= 0;
2140 loop_store_mems_idx
= 0;
2146 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2147 insn
= NEXT_INSN (insn
))
2149 if (GET_CODE (insn
) == NOTE
)
2151 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2154 /* Count number of loops contained in this one. */
2157 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2166 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_CONT
)
2169 loop_continue
= insn
;
2172 else if (GET_CODE (insn
) == CALL_INSN
)
2174 unknown_address_altered
= 1;
2179 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
2181 if (volatile_refs_p (PATTERN (insn
)))
2182 loop_has_volatile
= 1;
2184 note_stores (PATTERN (insn
), note_addr_stored
);
2190 /* Scan the function looking for loops. Record the start and end of each loop.
2191 Also mark as invalid loops any loops that contain a setjmp or are branched
2192 to from outside the loop. */
2195 find_and_verify_loops (f
)
2199 int current_loop
= -1;
2203 /* If there are jumps to undefined labels,
2204 treat them as jumps out of any/all loops.
2205 This also avoids writing past end of tables when there are no loops. */
2206 uid_loop_num
[0] = -1;
2208 /* Find boundaries of loops, mark which loops are contained within
2209 loops, and invalidate loops that have setjmp. */
2211 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2213 if (GET_CODE (insn
) == NOTE
)
2214 switch (NOTE_LINE_NUMBER (insn
))
2216 case NOTE_INSN_LOOP_BEG
:
2217 loop_number_loop_starts
[++next_loop
] = insn
;
2218 loop_number_loop_ends
[next_loop
] = 0;
2219 loop_outer_loop
[next_loop
] = current_loop
;
2220 loop_invalid
[next_loop
] = 0;
2221 loop_number_exit_labels
[next_loop
] = 0;
2222 current_loop
= next_loop
;
2225 case NOTE_INSN_SETJMP
:
2226 /* In this case, we must invalidate our current loop and any
2228 for (loop
= current_loop
; loop
!= -1; loop
= loop_outer_loop
[loop
])
2230 loop_invalid
[loop
] = 1;
2231 if (loop_dump_stream
)
2232 fprintf (loop_dump_stream
,
2233 "\nLoop at %d ignored due to setjmp.\n",
2234 INSN_UID (loop_number_loop_starts
[loop
]));
2238 case NOTE_INSN_LOOP_END
:
2239 if (current_loop
== -1)
2242 loop_number_loop_ends
[current_loop
] = insn
;
2243 current_loop
= loop_outer_loop
[current_loop
];
2248 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2249 enclosing loop, but this doesn't matter. */
2250 uid_loop_num
[INSN_UID (insn
)] = current_loop
;
2253 /* Any loop containing a label used in an initializer must be invalidated,
2254 because it can be jumped into from anywhere. */
2256 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2260 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (label
, 0))];
2262 loop_num
= loop_outer_loop
[loop_num
])
2263 loop_invalid
[loop_num
] = 1;
2266 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2267 loop that it is not contained within, that loop is marked invalid.
2268 If any INSN or CALL_INSN uses a label's address, then the loop containing
2269 that label is marked invalid, because it could be jumped into from
2272 Also look for blocks of code ending in an unconditional branch that
2273 exits the loop. If such a block is surrounded by a conditional
2274 branch around the block, move the block elsewhere (see below) and
2275 invert the jump to point to the code block. This may eliminate a
2276 label in our loop and will simplify processing by both us and a
2277 possible second cse pass. */
2279 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2280 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2282 int this_loop_num
= uid_loop_num
[INSN_UID (insn
)];
2284 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2286 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2291 for (loop_num
= uid_loop_num
[INSN_UID (XEXP (note
, 0))];
2293 loop_num
= loop_outer_loop
[loop_num
])
2294 loop_invalid
[loop_num
] = 1;
2298 if (GET_CODE (insn
) != JUMP_INSN
)
2301 mark_loop_jump (PATTERN (insn
), this_loop_num
);
2303 /* See if this is an unconditional branch outside the loop. */
2304 if (this_loop_num
!= -1
2305 && (GET_CODE (PATTERN (insn
)) == RETURN
2306 || (simplejump_p (insn
)
2307 && (uid_loop_num
[INSN_UID (JUMP_LABEL (insn
))]
2309 && get_max_uid () < max_uid_for_loop
)
2312 rtx our_next
= next_real_insn (insn
);
2314 /* Go backwards until we reach the start of the loop, a label,
2316 for (p
= PREV_INSN (insn
);
2317 GET_CODE (p
) != CODE_LABEL
2318 && ! (GET_CODE (p
) == NOTE
2319 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2320 && GET_CODE (p
) != JUMP_INSN
;
2324 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2325 we have a block of code to try to move.
2327 We look backward and then forward from the target of INSN
2328 to find a BARRIER at the same loop depth as the target.
2329 If we find such a BARRIER, we make a new label for the start
2330 of the block, invert the jump in P and point it to that label,
2331 and move the block of code to the spot we found. */
2333 if (GET_CODE (p
) == JUMP_INSN
2334 && JUMP_LABEL (p
) != 0
2335 /* Just ignore jumps to labels that were never emitted.
2336 These always indicate compilation errors. */
2337 && INSN_UID (JUMP_LABEL (p
)) != 0
2339 && ! simplejump_p (p
)
2340 && next_real_insn (JUMP_LABEL (p
)) == our_next
)
2343 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2344 int target_loop_num
= uid_loop_num
[INSN_UID (target
)];
2347 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2348 if (GET_CODE (loc
) == BARRIER
2349 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2353 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2354 if (GET_CODE (loc
) == BARRIER
2355 && uid_loop_num
[INSN_UID (loc
)] == target_loop_num
)
2360 rtx cond_label
= JUMP_LABEL (p
);
2361 rtx new_label
= get_label_after (p
);
2363 /* Ensure our label doesn't go away. */
2364 LABEL_NUSES (cond_label
)++;
2366 /* Verify that uid_loop_num is large enough and that
2368 if (invert_jump (p
, new_label
))
2372 /* Include the BARRIER after INSN and copy the
2374 new_label
= squeeze_notes (new_label
, NEXT_INSN (insn
));
2375 reorder_insns (new_label
, NEXT_INSN (insn
), loc
);
2377 /* All those insns are now in TARGET_LOOP_NUM. */
2378 for (q
= new_label
; q
!= NEXT_INSN (NEXT_INSN (insn
));
2380 uid_loop_num
[INSN_UID (q
)] = target_loop_num
;
2382 /* The label jumped to by INSN is no longer a loop exit.
2383 Unless INSN does not have a label (e.g., it is a
2384 RETURN insn), search loop_number_exit_labels to find
2385 its label_ref, and remove it. Also turn off
2386 LABEL_OUTSIDE_LOOP_P bit. */
2387 if (JUMP_LABEL (insn
))
2390 r
= loop_number_exit_labels
[this_loop_num
];
2391 r
; q
= r
, r
= LABEL_NEXTREF (r
))
2392 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2394 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2396 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2398 loop_number_exit_labels
[this_loop_num
]
2399 = LABEL_NEXTREF (r
);
2403 /* If we didn't find it, then something is wrong. */
2408 /* P is now a jump outside the loop, so it must be put
2409 in loop_number_exit_labels, and marked as such.
2410 The easiest way to do this is to just call
2411 mark_loop_jump again for P. */
2412 mark_loop_jump (PATTERN (p
), this_loop_num
);
2414 /* If INSN now jumps to the insn after it,
2416 if (JUMP_LABEL (insn
) != 0
2417 && (next_real_insn (JUMP_LABEL (insn
))
2418 == next_real_insn (insn
)))
2422 /* Continue the loop after where the conditional
2423 branch used to jump, since the only branch insn
2424 in the block (if it still remains) is an inter-loop
2425 branch and hence needs no processing. */
2426 insn
= NEXT_INSN (cond_label
);
2428 if (--LABEL_NUSES (cond_label
) == 0)
2429 delete_insn (cond_label
);
2431 /* This loop will be continued with NEXT_INSN (insn). */
2432 insn
= PREV_INSN (insn
);
2439 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2440 loops it is contained in, mark the target loop invalid.
2442 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2445 mark_loop_jump (x
, loop_num
)
2453 switch (GET_CODE (x
))
2466 /* There could be a label reference in here. */
2467 mark_loop_jump (XEXP (x
, 0), loop_num
);
2473 mark_loop_jump (XEXP (x
, 0), loop_num
);
2474 mark_loop_jump (XEXP (x
, 1), loop_num
);
2479 mark_loop_jump (XEXP (x
, 0), loop_num
);
2483 dest_loop
= uid_loop_num
[INSN_UID (XEXP (x
, 0))];
2485 /* Link together all labels that branch outside the loop. This
2486 is used by final_[bg]iv_value and the loop unrolling code. Also
2487 mark this LABEL_REF so we know that this branch should predict
2490 if (dest_loop
!= loop_num
&& loop_num
!= -1)
2492 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2493 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2494 loop_number_exit_labels
[loop_num
] = x
;
2497 /* If this is inside a loop, but not in the current loop or one enclosed
2498 by it, it invalidates at least one loop. */
2500 if (dest_loop
== -1)
2503 /* We must invalidate every nested loop containing the target of this
2504 label, except those that also contain the jump insn. */
2506 for (; dest_loop
!= -1; dest_loop
= loop_outer_loop
[dest_loop
])
2508 /* Stop when we reach a loop that also contains the jump insn. */
2509 for (outer_loop
= loop_num
; outer_loop
!= -1;
2510 outer_loop
= loop_outer_loop
[outer_loop
])
2511 if (dest_loop
== outer_loop
)
2514 /* If we get here, we know we need to invalidate a loop. */
2515 if (loop_dump_stream
&& ! loop_invalid
[dest_loop
])
2516 fprintf (loop_dump_stream
,
2517 "\nLoop at %d ignored due to multiple entry points.\n",
2518 INSN_UID (loop_number_loop_starts
[dest_loop
]));
2520 loop_invalid
[dest_loop
] = 1;
2525 /* If this is not setting pc, ignore. */
2526 if (SET_DEST (x
) == pc_rtx
)
2527 mark_loop_jump (SET_SRC (x
), loop_num
);
2531 mark_loop_jump (XEXP (x
, 1), loop_num
);
2532 mark_loop_jump (XEXP (x
, 2), loop_num
);
2537 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2538 mark_loop_jump (XVECEXP (x
, 0, i
), loop_num
);
2542 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
2543 mark_loop_jump (XVECEXP (x
, 1, i
), loop_num
);
2547 /* Treat anything else (such as a symbol_ref)
2548 as a branch out of this loop, but not into any loop. */
2552 LABEL_OUTSIDE_LOOP_P (x
) = 1;
2553 LABEL_NEXTREF (x
) = loop_number_exit_labels
[loop_num
];
2554 loop_number_exit_labels
[loop_num
] = x
;
2561 /* Return nonzero if there is a label in the range from
2562 insn INSN to and including the insn whose luid is END
2563 INSN must have an assigned luid (i.e., it must not have
2564 been previously created by loop.c). */
2567 labels_in_range_p (insn
, end
)
2571 while (insn
&& INSN_LUID (insn
) <= end
)
2573 if (GET_CODE (insn
) == CODE_LABEL
)
2575 insn
= NEXT_INSN (insn
);
2581 /* Record that a memory reference X is being set. */
2584 note_addr_stored (x
)
2589 if (x
== 0 || GET_CODE (x
) != MEM
)
2592 /* Count number of memory writes.
2593 This affects heuristics in strength_reduce. */
2596 /* BLKmode MEM means all memory is clobbered. */
2597 if (GET_MODE (x
) == BLKmode
)
2598 unknown_address_altered
= 1;
2600 if (unknown_address_altered
)
2603 for (i
= 0; i
< loop_store_mems_idx
; i
++)
2604 if (rtx_equal_p (XEXP (loop_store_mems
[i
], 0), XEXP (x
, 0))
2605 && MEM_IN_STRUCT_P (x
) == MEM_IN_STRUCT_P (loop_store_mems
[i
]))
2607 /* We are storing at the same address as previously noted. Save the
2609 if (GET_MODE_SIZE (GET_MODE (x
))
2610 > GET_MODE_SIZE (GET_MODE (loop_store_mems
[i
])))
2611 loop_store_mems
[i
] = x
;
2615 if (i
== NUM_STORES
)
2616 unknown_address_altered
= 1;
2618 else if (i
== loop_store_mems_idx
)
2619 loop_store_mems
[loop_store_mems_idx
++] = x
;
2622 /* Return nonzero if the rtx X is invariant over the current loop.
2624 The value is 2 if we refer to something only conditionally invariant.
2626 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2627 Otherwise, a memory ref is invariant if it does not conflict with
2628 anything stored in `loop_store_mems'. */
2635 register enum rtx_code code
;
2637 int conditional
= 0;
2641 code
= GET_CODE (x
);
2651 /* A LABEL_REF is normally invariant, however, if we are unrolling
2652 loops, and this label is inside the loop, then it isn't invariant.
2653 This is because each unrolled copy of the loop body will have
2654 a copy of this label. If this was invariant, then an insn loading
2655 the address of this label into a register might get moved outside
2656 the loop, and then each loop body would end up using the same label.
2658 We don't know the loop bounds here though, so just fail for all
2660 if (flag_unroll_loops
)
2667 case UNSPEC_VOLATILE
:
2671 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2672 since the reg might be set by initialization within the loop. */
2673 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
2674 || x
== arg_pointer_rtx
)
2677 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
2679 if (n_times_set
[REGNO (x
)] < 0)
2681 return n_times_set
[REGNO (x
)] == 0;
2684 /* Read-only items (such as constants in a constant pool) are
2685 invariant if their address is. */
2686 if (RTX_UNCHANGING_P (x
))
2689 /* If we filled the table (or had a subroutine call), any location
2690 in memory could have been clobbered. */
2691 if (unknown_address_altered
2692 /* Don't mess with volatile memory references. */
2693 || MEM_VOLATILE_P (x
))
2696 /* See if there is any dependence between a store and this load. */
2697 for (i
= loop_store_mems_idx
- 1; i
>= 0; i
--)
2698 if (true_dependence (loop_store_mems
[i
], x
))
2701 /* It's not invalidated by a store in memory
2702 but we must still verify the address is invariant. */
2706 /* Don't mess with insns declared volatile. */
2707 if (MEM_VOLATILE_P (x
))
2711 fmt
= GET_RTX_FORMAT (code
);
2712 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2716 int tem
= invariant_p (XEXP (x
, i
));
2722 else if (fmt
[i
] == 'E')
2725 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2727 int tem
= invariant_p (XVECEXP (x
, i
, j
));
2737 return 1 + conditional
;
2741 /* Return nonzero if all the insns in the loop that set REG
2742 are INSN and the immediately following insns,
2743 and if each of those insns sets REG in an invariant way
2744 (not counting uses of REG in them).
2746 The value is 2 if some of these insns are only conditionally invariant.
2748 We assume that INSN itself is the first set of REG
2749 and that its source is invariant. */
2752 consec_sets_invariant_p (reg
, n_sets
, insn
)
2756 register rtx p
= insn
;
2757 register int regno
= REGNO (reg
);
2759 /* Number of sets we have to insist on finding after INSN. */
2760 int count
= n_sets
- 1;
2761 int old
= n_times_set
[regno
];
2765 /* If N_SETS hit the limit, we can't rely on its value. */
2769 n_times_set
[regno
] = 0;
2773 register enum rtx_code code
;
2777 code
= GET_CODE (p
);
2779 /* If library call, skip to end of of it. */
2780 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
2785 && (set
= single_set (p
))
2786 && GET_CODE (SET_DEST (set
)) == REG
2787 && REGNO (SET_DEST (set
)) == regno
)
2789 this = invariant_p (SET_SRC (set
));
2792 else if (temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
2794 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
2795 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
2797 this = (CONSTANT_P (XEXP (temp
, 0))
2798 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
2799 && invariant_p (XEXP (temp
, 0))));
2806 else if (code
!= NOTE
)
2808 n_times_set
[regno
] = old
;
2813 n_times_set
[regno
] = old
;
2814 /* If invariant_p ever returned 2, we return 2. */
2815 return 1 + (value
& 2);
2819 /* I don't think this condition is sufficient to allow INSN
2820 to be moved, so we no longer test it. */
2822 /* Return 1 if all insns in the basic block of INSN and following INSN
2823 that set REG are invariant according to TABLE. */
2826 all_sets_invariant_p (reg
, insn
, table
)
2830 register rtx p
= insn
;
2831 register int regno
= REGNO (reg
);
2835 register enum rtx_code code
;
2837 code
= GET_CODE (p
);
2838 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
2840 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
2841 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
2842 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
2844 if (!invariant_p (SET_SRC (PATTERN (p
)), table
))
2851 /* Look at all uses (not sets) of registers in X. For each, if it is
2852 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
2853 a different insn, set USAGE[REGNO] to const0_rtx. */
2856 find_single_use_in_loop (insn
, x
, usage
)
2861 enum rtx_code code
= GET_CODE (x
);
2862 char *fmt
= GET_RTX_FORMAT (code
);
2867 = (usage
[REGNO (x
)] != 0 && usage
[REGNO (x
)] != insn
)
2868 ? const0_rtx
: insn
;
2870 else if (code
== SET
)
2872 /* Don't count SET_DEST if it is a REG; otherwise count things
2873 in SET_DEST because if a register is partially modified, it won't
2874 show up as a potential movable so we don't care how USAGE is set
2876 if (GET_CODE (SET_DEST (x
)) != REG
)
2877 find_single_use_in_loop (insn
, SET_DEST (x
), usage
);
2878 find_single_use_in_loop (insn
, SET_SRC (x
), usage
);
2881 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2883 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
2884 find_single_use_in_loop (insn
, XEXP (x
, i
), usage
);
2885 else if (fmt
[i
] == 'E')
2886 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2887 find_single_use_in_loop (insn
, XVECEXP (x
, i
, j
), usage
);
2891 /* Increment N_TIMES_SET at the index of each register
2892 that is modified by an insn between FROM and TO.
2893 If the value of an element of N_TIMES_SET becomes 127 or more,
2894 stop incrementing it, to avoid overflow.
2896 Store in SINGLE_USAGE[I] the single insn in which register I is
2897 used, if it is only used once. Otherwise, it is set to 0 (for no
2898 uses) or const0_rtx for more than one use. This parameter may be zero,
2899 in which case this processing is not done.
2901 Store in *COUNT_PTR the number of actual instruction
2902 in the loop. We use this to decide what is worth moving out. */
2904 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
2905 In that case, it is the insn that last set reg n. */
2908 count_loop_regs_set (from
, to
, may_not_move
, single_usage
, count_ptr
, nregs
)
2909 register rtx from
, to
;
2915 register rtx
*last_set
= (rtx
*) alloca (nregs
* sizeof (rtx
));
2917 register int count
= 0;
2920 bzero (last_set
, nregs
* sizeof (rtx
));
2921 for (insn
= from
; insn
!= to
; insn
= NEXT_INSN (insn
))
2923 if (GET_RTX_CLASS (GET_CODE (insn
)) == 'i')
2927 /* If requested, record registers that have exactly one use. */
2930 find_single_use_in_loop (insn
, PATTERN (insn
), single_usage
);
2932 /* Include uses in REG_EQUAL notes. */
2933 if (REG_NOTES (insn
))
2934 find_single_use_in_loop (insn
, REG_NOTES (insn
), single_usage
);
2937 if (GET_CODE (PATTERN (insn
)) == CLOBBER
2938 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
)
2939 /* Don't move a reg that has an explicit clobber.
2940 We might do so sometimes, but it's not worth the pain. */
2941 may_not_move
[REGNO (XEXP (PATTERN (insn
), 0))] = 1;
2943 if (GET_CODE (PATTERN (insn
)) == SET
2944 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
2946 dest
= SET_DEST (PATTERN (insn
));
2947 while (GET_CODE (dest
) == SUBREG
2948 || GET_CODE (dest
) == ZERO_EXTRACT
2949 || GET_CODE (dest
) == SIGN_EXTRACT
2950 || GET_CODE (dest
) == STRICT_LOW_PART
)
2951 dest
= XEXP (dest
, 0);
2952 if (GET_CODE (dest
) == REG
)
2954 register int regno
= REGNO (dest
);
2955 /* If this is the first setting of this reg
2956 in current basic block, and it was set before,
2957 it must be set in two basic blocks, so it cannot
2958 be moved out of the loop. */
2959 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
2960 may_not_move
[regno
] = 1;
2961 /* If this is not first setting in current basic block,
2962 see if reg was used in between previous one and this.
2963 If so, neither one can be moved. */
2964 if (last_set
[regno
] != 0
2965 && reg_used_between_p (dest
, last_set
[regno
], insn
))
2966 may_not_move
[regno
] = 1;
2967 if (n_times_set
[regno
] < 127)
2968 ++n_times_set
[regno
];
2969 last_set
[regno
] = insn
;
2972 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2975 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
2977 register rtx x
= XVECEXP (PATTERN (insn
), 0, i
);
2978 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
2979 /* Don't move a reg that has an explicit clobber.
2980 It's not worth the pain to try to do it correctly. */
2981 may_not_move
[REGNO (XEXP (x
, 0))] = 1;
2983 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
2985 dest
= SET_DEST (x
);
2986 while (GET_CODE (dest
) == SUBREG
2987 || GET_CODE (dest
) == ZERO_EXTRACT
2988 || GET_CODE (dest
) == SIGN_EXTRACT
2989 || GET_CODE (dest
) == STRICT_LOW_PART
)
2990 dest
= XEXP (dest
, 0);
2991 if (GET_CODE (dest
) == REG
)
2993 register int regno
= REGNO (dest
);
2994 if (n_times_set
[regno
] > 0 && last_set
[regno
] == 0)
2995 may_not_move
[regno
] = 1;
2996 if (last_set
[regno
] != 0
2997 && reg_used_between_p (dest
, last_set
[regno
], insn
))
2998 may_not_move
[regno
] = 1;
2999 if (n_times_set
[regno
] < 127)
3000 ++n_times_set
[regno
];
3001 last_set
[regno
] = insn
;
3007 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
3008 bzero (last_set
, nregs
* sizeof (rtx
));
3013 /* Given a loop that is bounded by LOOP_START and LOOP_END
3014 and that is entered at SCAN_START,
3015 return 1 if the register set in SET contained in insn INSN is used by
3016 any insn that precedes INSN in cyclic order starting
3017 from the loop entry point.
3019 We don't want to use INSN_LUID here because if we restrict INSN to those
3020 that have a valid INSN_LUID, it means we cannot move an invariant out
3021 from an inner loop past two loops. */
3024 loop_reg_used_before_p (set
, insn
, loop_start
, scan_start
, loop_end
)
3025 rtx set
, insn
, loop_start
, scan_start
, loop_end
;
3027 rtx reg
= SET_DEST (set
);
3030 /* Scan forward checking for register usage. If we hit INSN, we
3031 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3032 for (p
= scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3034 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i'
3035 && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3045 /* A "basic induction variable" or biv is a pseudo reg that is set
3046 (within this loop) only by incrementing or decrementing it. */
3047 /* A "general induction variable" or giv is a pseudo reg whose
3048 value is a linear function of a biv. */
3050 /* Bivs are recognized by `basic_induction_var';
3051 Givs by `general_induct_var'. */
3053 /* Indexed by register number, indicates whether or not register is an
3054 induction variable, and if so what type. */
3056 enum iv_mode
*reg_iv_type
;
3058 /* Indexed by register number, contains pointer to `struct induction'
3059 if register is an induction variable. This holds general info for
3060 all induction variables. */
3062 struct induction
**reg_iv_info
;
3064 /* Indexed by register number, contains pointer to `struct iv_class'
3065 if register is a basic induction variable. This holds info describing
3066 the class (a related group) of induction variables that the biv belongs
3069 struct iv_class
**reg_biv_class
;
3071 /* The head of a list which links together (via the next field)
3072 every iv class for the current loop. */
3074 struct iv_class
*loop_iv_list
;
3076 /* Communication with routines called via `note_stores'. */
3078 static rtx note_insn
;
3080 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3082 static rtx addr_placeholder
;
3084 /* ??? Unfinished optimizations, and possible future optimizations,
3085 for the strength reduction code. */
3087 /* ??? There is one more optimization you might be interested in doing: to
3088 allocate pseudo registers for frequently-accessed memory locations.
3089 If the same memory location is referenced each time around, it might
3090 be possible to copy it into a register before and out after.
3091 This is especially useful when the memory location is a variable which
3092 is in a stack slot because somewhere its address is taken. If the
3093 loop doesn't contain a function call and the variable isn't volatile,
3094 it is safe to keep the value in a register for the duration of the
3095 loop. One tricky thing is that the copying of the value back from the
3096 register has to be done on all exits from the loop. You need to check that
3097 all the exits from the loop go to the same place. */
3099 /* ??? The interaction of biv elimination, and recognition of 'constant'
3100 bivs, may cause problems. */
3102 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3103 performance problems.
3105 Perhaps don't eliminate things that can be combined with an addressing
3106 mode. Find all givs that have the same biv, mult_val, and add_val;
3107 then for each giv, check to see if its only use dies in a following
3108 memory address. If so, generate a new memory address and check to see
3109 if it is valid. If it is valid, then store the modified memory address,
3110 otherwise, mark the giv as not done so that it will get its own iv. */
3112 /* ??? Could try to optimize branches when it is known that a biv is always
3115 /* ??? When replace a biv in a compare insn, we should replace with closest
3116 giv so that an optimized branch can still be recognized by the combiner,
3117 e.g. the VAX acb insn. */
3119 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3120 was rerun in loop_optimize whenever a register was added or moved.
3121 Also, some of the optimizations could be a little less conservative. */
3123 /* Perform strength reduction and induction variable elimination. */
3125 /* Pseudo registers created during this function will be beyond the last
3126 valid index in several tables including n_times_set and regno_last_uid.
3127 This does not cause a problem here, because the added registers cannot be
3128 givs outside of their loop, and hence will never be reconsidered.
3129 But scan_loop must check regnos to make sure they are in bounds. */
3132 strength_reduce (scan_start
, end
, loop_top
, insn_count
,
3133 loop_start
, loop_end
)
3146 /* This is 1 if current insn is not executed at least once for every loop
3148 int not_every_iteration
= 0;
3149 /* This is 1 if current insn may be executed more than once for every
3151 int maybe_multiple
= 0;
3152 /* Temporary list pointers for traversing loop_iv_list. */
3153 struct iv_class
*bl
, **backbl
;
3154 /* Ratio of extra register life span we can justify
3155 for saving an instruction. More if loop doesn't call subroutines
3156 since in that case saving an insn makes more difference
3157 and more registers are available. */
3158 /* ??? could set this to last value of threshold in move_movables */
3159 int threshold
= (loop_has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
3160 /* Map of pseudo-register replacements. */
3164 rtx end_insert_before
;
3167 reg_iv_type
= (enum iv_mode
*) alloca (max_reg_before_loop
3168 * sizeof (enum iv_mode
*));
3169 bzero ((char *) reg_iv_type
, max_reg_before_loop
* sizeof (enum iv_mode
*));
3170 reg_iv_info
= (struct induction
**)
3171 alloca (max_reg_before_loop
* sizeof (struct induction
*));
3172 bzero ((char *) reg_iv_info
, (max_reg_before_loop
3173 * sizeof (struct induction
*)));
3174 reg_biv_class
= (struct iv_class
**)
3175 alloca (max_reg_before_loop
* sizeof (struct iv_class
*));
3176 bzero ((char *) reg_biv_class
, (max_reg_before_loop
3177 * sizeof (struct iv_class
*)));
3180 addr_placeholder
= gen_reg_rtx (Pmode
);
3182 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3183 must be put before this insn, so that they will appear in the right
3184 order (i.e. loop order).
3186 If loop_end is the end of the current function, then emit a
3187 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3189 if (NEXT_INSN (loop_end
) != 0)
3190 end_insert_before
= NEXT_INSN (loop_end
);
3192 end_insert_before
= emit_note_after (NOTE_INSN_DELETED
, loop_end
);
3194 /* Scan through loop to find all possible bivs. */
3200 /* At end of a straight-in loop, we are done.
3201 At end of a loop entered at the bottom, scan the top. */
3202 if (p
== scan_start
)
3210 if (p
== scan_start
)
3214 if (GET_CODE (p
) == INSN
3215 && (set
= single_set (p
))
3216 && GET_CODE (SET_DEST (set
)) == REG
)
3218 dest_reg
= SET_DEST (set
);
3219 if (REGNO (dest_reg
) < max_reg_before_loop
3220 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
3221 && reg_iv_type
[REGNO (dest_reg
)] != NOT_BASIC_INDUCT
)
3223 if (basic_induction_var (SET_SRC (set
), GET_MODE (SET_SRC (set
)),
3224 dest_reg
, p
, &inc_val
, &mult_val
))
3226 /* It is a possible basic induction variable.
3227 Create and initialize an induction structure for it. */
3230 = (struct induction
*) alloca (sizeof (struct induction
));
3232 record_biv (v
, p
, dest_reg
, inc_val
, mult_val
,
3233 not_every_iteration
, maybe_multiple
);
3234 reg_iv_type
[REGNO (dest_reg
)] = BASIC_INDUCT
;
3236 else if (REGNO (dest_reg
) < max_reg_before_loop
)
3237 reg_iv_type
[REGNO (dest_reg
)] = NOT_BASIC_INDUCT
;
3241 /* Past CODE_LABEL, we get to insns that may be executed multiple
3242 times. The only way we can be sure that they can't is if every
3243 every jump insn between here and the end of the loop either
3244 returns, exits the loop, or is a forward jump. */
3246 if (GET_CODE (p
) == CODE_LABEL
)
3254 insn
= NEXT_INSN (insn
);
3255 if (insn
== scan_start
)
3263 if (insn
== scan_start
)
3267 if (GET_CODE (insn
) == JUMP_INSN
3268 && GET_CODE (PATTERN (insn
)) != RETURN
3269 && (! condjump_p (insn
)
3270 || (JUMP_LABEL (insn
) != 0
3271 && (INSN_UID (JUMP_LABEL (insn
)) >= max_uid_for_loop
3272 || INSN_UID (insn
) >= max_uid_for_loop
3273 || (INSN_LUID (JUMP_LABEL (insn
))
3274 < INSN_LUID (insn
))))))
3282 /* Past a label or a jump, we get to insns for which we can't count
3283 on whether or how many times they will be executed during each
3285 /* This code appears in three places, once in scan_loop, and twice
3286 in strength_reduce. */
3287 if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
3288 /* If we enter the loop in the middle, and scan around to the
3289 beginning, don't set not_every_iteration for that.
3290 This can be any kind of jump, since we want to know if insns
3291 will be executed if the loop is executed. */
3292 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
3293 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3294 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3295 not_every_iteration
= 1;
3297 else if (GET_CODE (p
) == NOTE
)
3299 /* At the virtual top of a converted loop, insns are again known to
3300 be executed each iteration: logically, the loop begins here
3301 even though the exit code has been duplicated. */
3302 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3303 not_every_iteration
= 0;
3304 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3306 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3310 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3311 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3312 or not an insn is known to be executed each iteration of the
3313 loop, whether or not any iterations are known to occur.
3315 Therefore, if we have just passed a label and have no more labels
3316 between here and the test insn of the loop, we know these insns
3317 will be executed each iteration. This can also happen if we
3318 have just passed a jump, for example, when there are nested loops. */
3320 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3321 && no_labels_between_p (p
, loop_end
))
3322 not_every_iteration
= 0;
3325 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3326 Make a sanity check against n_times_set. */
3327 for (backbl
= &loop_iv_list
, bl
= *backbl
; bl
; bl
= bl
->next
)
3329 if (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3330 /* Above happens if register modified by subreg, etc. */
3331 /* Make sure it is not recognized as a basic induction var: */
3332 || n_times_set
[bl
->regno
] != bl
->biv_count
3333 /* If never incremented, it is invariant that we decided not to
3334 move. So leave it alone. */
3335 || ! bl
->incremented
)
3337 if (loop_dump_stream
)
3338 fprintf (loop_dump_stream
, "Reg %d: biv discarded, %s\n",
3340 (reg_iv_type
[bl
->regno
] != BASIC_INDUCT
3341 ? "not induction variable"
3342 : (! bl
->incremented
? "never incremented"
3345 reg_iv_type
[bl
->regno
] = NOT_BASIC_INDUCT
;
3352 if (loop_dump_stream
)
3353 fprintf (loop_dump_stream
, "Reg %d: biv verified\n", bl
->regno
);
3357 /* Exit if there are no bivs. */
3360 /* Can still unroll the loop anyways, but indicate that there is no
3361 strength reduction info available. */
3362 if (flag_unroll_loops
)
3363 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 0);
3368 /* Find initial value for each biv by searching backwards from loop_start,
3369 halting at first label. Also record any test condition. */
3372 for (p
= loop_start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
3376 if (GET_CODE (p
) == CALL_INSN
)
3379 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3380 || GET_CODE (p
) == CALL_INSN
)
3381 note_stores (PATTERN (p
), record_initial
);
3383 /* Record any test of a biv that branches around the loop if no store
3384 between it and the start of loop. We only care about tests with
3385 constants and registers and only certain of those. */
3386 if (GET_CODE (p
) == JUMP_INSN
3387 && JUMP_LABEL (p
) != 0
3388 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop_end
)
3389 && (test
= get_condition_for_loop (p
)) != 0
3390 && GET_CODE (XEXP (test
, 0)) == REG
3391 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
3392 && (bl
= reg_biv_class
[REGNO (XEXP (test
, 0))]) != 0
3393 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop_start
)
3394 && bl
->init_insn
== 0)
3396 /* If an NE test, we have an initial value! */
3397 if (GET_CODE (test
) == NE
)
3400 bl
->init_set
= gen_rtx (SET
, VOIDmode
,
3401 XEXP (test
, 0), XEXP (test
, 1));
3404 bl
->initial_test
= test
;
3408 /* Look at the each biv and see if we can say anything better about its
3409 initial value from any initializing insns set up above. (This is done
3410 in two passes to avoid missing SETs in a PARALLEL.) */
3411 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3415 if (! bl
->init_insn
)
3418 src
= SET_SRC (bl
->init_set
);
3420 if (loop_dump_stream
)
3421 fprintf (loop_dump_stream
,
3422 "Biv %d initialized at insn %d: initial value ",
3423 bl
->regno
, INSN_UID (bl
->init_insn
));
3425 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
3426 || GET_MODE (src
) == VOIDmode
)
3427 && valid_initial_value_p (src
, bl
->init_insn
, call_seen
, loop_start
))
3429 bl
->initial_value
= src
;
3431 if (loop_dump_stream
)
3433 if (GET_CODE (src
) == CONST_INT
)
3434 fprintf (loop_dump_stream
, "%d\n", INTVAL (src
));
3437 print_rtl (loop_dump_stream
, src
);
3438 fprintf (loop_dump_stream
, "\n");
3444 /* Biv initial value is not simple move,
3445 so let it keep initial value of "itself". */
3447 if (loop_dump_stream
)
3448 fprintf (loop_dump_stream
, "is complex\n");
3452 /* Search the loop for general induction variables. */
3454 /* A register is a giv if: it is only set once, it is a function of a
3455 biv and a constant (or invariant), and it is not a biv. */
3457 not_every_iteration
= 0;
3463 /* At end of a straight-in loop, we are done.
3464 At end of a loop entered at the bottom, scan the top. */
3465 if (p
== scan_start
)
3473 if (p
== scan_start
)
3477 /* Look for a general induction variable in a register. */
3478 if (GET_CODE (p
) == INSN
3479 && (set
= single_set (p
))
3480 && GET_CODE (SET_DEST (set
)) == REG
3481 && ! may_not_optimize
[REGNO (SET_DEST (set
))])
3489 dest_reg
= SET_DEST (set
);
3490 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
3493 if (/* SET_SRC is a giv. */
3494 ((benefit
= general_induction_var (SET_SRC (set
),
3497 /* Equivalent expression is a giv. */
3498 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
3499 && (benefit
= general_induction_var (XEXP (regnote
, 0),
3501 &add_val
, &mult_val
))))
3502 /* Don't try to handle any regs made by loop optimization.
3503 We have nothing on them in regno_first_uid, etc. */
3504 && REGNO (dest_reg
) < max_reg_before_loop
3505 /* Don't recognize a BASIC_INDUCT_VAR here. */
3506 && dest_reg
!= src_reg
3507 /* This must be the only place where the register is set. */
3508 && (n_times_set
[REGNO (dest_reg
)] == 1
3509 /* or all sets must be consecutive and make a giv. */
3510 || (benefit
= consec_sets_giv (benefit
, p
,
3512 &add_val
, &mult_val
))))
3516 = (struct induction
*) alloca (sizeof (struct induction
));
3519 /* If this is a library call, increase benefit. */
3520 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
3521 benefit
+= libcall_benefit (p
);
3523 /* Skip the consecutive insns, if there are any. */
3524 for (count
= n_times_set
[REGNO (dest_reg
)] - 1;
3527 /* If first insn of libcall sequence, skip to end.
3528 Do this at start of loop, since INSN is guaranteed to
3530 if (GET_CODE (p
) != NOTE
3531 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3534 do p
= NEXT_INSN (p
);
3535 while (GET_CODE (p
) == NOTE
);
3538 record_giv (v
, p
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
3539 DEST_REG
, not_every_iteration
, NULL_PTR
, loop_start
,
3545 #ifndef DONT_REDUCE_ADDR
3546 /* Look for givs which are memory addresses. */
3547 /* This resulted in worse code on a VAX 8600. I wonder if it
3549 if (GET_CODE (p
) == INSN
)
3550 find_mem_givs (PATTERN (p
), p
, not_every_iteration
, loop_start
,
3554 /* Update the status of whether giv can derive other givs. This can
3555 change when we pass a label or an insn that updates a biv. */
3556 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
3557 || GET_CODE (p
) == CODE_LABEL
)
3558 update_giv_derive (p
);
3560 /* Past a label or a jump, we get to insns for which we can't count
3561 on whether or how many times they will be executed during each
3563 /* This code appears in three places, once in scan_loop, and twice
3564 in strength_reduce. */
3565 if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
3566 /* If we enter the loop in the middle, and scan around
3567 to the beginning, don't set not_every_iteration for that.
3568 This can be any kind of jump, since we want to know if insns
3569 will be executed if the loop is executed. */
3570 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop_top
3571 && ((NEXT_INSN (NEXT_INSN (p
)) == loop_end
&& simplejump_p (p
))
3572 || (NEXT_INSN (p
) == loop_end
&& condjump_p (p
)))))
3573 not_every_iteration
= 1;
3575 else if (GET_CODE (p
) == NOTE
)
3577 /* At the virtual top of a converted loop, insns are again known to
3578 be executed each iteration: logically, the loop begins here
3579 even though the exit code has been duplicated. */
3580 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
3581 not_every_iteration
= 0;
3582 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3584 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
3588 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3589 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3590 or not an insn is known to be executed each iteration of the
3591 loop, whether or not any iterations are known to occur.
3593 Therefore, if we have just passed a label and have no more labels
3594 between here and the test insn of the loop, we know these insns
3595 will be executed each iteration. */
3597 if (not_every_iteration
&& GET_CODE (p
) == CODE_LABEL
3598 && no_labels_between_p (p
, loop_end
))
3599 not_every_iteration
= 0;
3602 /* Try to calculate and save the number of loop iterations. This is
3603 set to zero if the actual number can not be calculated. This must
3604 be called after all giv's have been identified, since otherwise it may
3605 fail if the iteration variable is a giv. */
3607 loop_n_iterations
= loop_iterations (loop_start
, loop_end
);
3609 /* Now for each giv for which we still don't know whether or not it is
3610 replaceable, check to see if it is replaceable because its final value
3611 can be calculated. This must be done after loop_iterations is called,
3612 so that final_giv_value will work correctly. */
3614 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3616 struct induction
*v
;
3618 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3619 if (! v
->replaceable
&& ! v
->not_replaceable
)
3620 check_final_value (v
, loop_start
, loop_end
);
3623 /* Try to prove that the loop counter variable (if any) is always
3624 nonnegative; if so, record that fact with a REG_NONNEG note
3625 so that "decrement and branch until zero" insn can be used. */
3626 check_dbra_loop (loop_end
, insn_count
, loop_start
);
3628 /* Create reg_map to hold substitutions for replaceable giv regs. */
3629 reg_map
= (rtx
*) alloca (max_reg_before_loop
* sizeof (rtx
));
3630 bzero ((char *) reg_map
, max_reg_before_loop
* sizeof (rtx
));
3632 /* Examine each iv class for feasibility of strength reduction/induction
3633 variable elimination. */
3635 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
3637 struct induction
*v
;
3640 rtx final_value
= 0;
3642 /* Test whether it will be possible to eliminate this biv
3643 provided all givs are reduced. This is possible if either
3644 the reg is not used outside the loop, or we can compute
3645 what its final value will be.
3647 For architectures with a decrement_and_branch_until_zero insn,
3648 don't do this if we put a REG_NONNEG note on the endtest for
3651 /* Compare against bl->init_insn rather than loop_start.
3652 We aren't concerned with any uses of the biv between
3653 init_insn and loop_start since these won't be affected
3654 by the value of the biv elsewhere in the function, so
3655 long as init_insn doesn't use the biv itself.
3656 March 14, 1989 -- self@bayes.arc.nasa.gov */
3658 if ((uid_luid
[regno_last_uid
[bl
->regno
]] < INSN_LUID (loop_end
)
3660 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
3661 && uid_luid
[regno_first_uid
[bl
->regno
]] >= INSN_LUID (bl
->init_insn
)
3662 #ifdef HAVE_decrement_and_branch_until_zero
3665 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
3666 || ((final_value
= final_biv_value (bl
, loop_start
, loop_end
))
3667 #ifdef HAVE_decrement_and_branch_until_zero
3671 bl
->eliminable
= maybe_eliminate_biv (bl
, loop_start
, end
, 0,
3672 threshold
, insn_count
);
3675 if (loop_dump_stream
)
3677 fprintf (loop_dump_stream
,
3678 "Cannot eliminate biv %d.\n",
3680 fprintf (loop_dump_stream
,
3681 "First use: insn %d, last use: insn %d.\n",
3682 regno_first_uid
[bl
->regno
],
3683 regno_last_uid
[bl
->regno
]);
3687 /* Combine all giv's for this iv_class. */
3690 /* This will be true at the end, if all givs which depend on this
3691 biv have been strength reduced.
3692 We can't (currently) eliminate the biv unless this is so. */
3695 /* Check each giv in this class to see if we will benefit by reducing
3696 it. Skip giv's combined with others. */
3697 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3699 struct induction
*tv
;
3701 if (v
->ignore
|| v
->same
)
3704 benefit
= v
->benefit
;
3706 /* Reduce benefit if not replaceable, since we will insert
3707 a move-insn to replace the insn that calculates this giv.
3708 Don't do this unless the giv is a user variable, since it
3709 will often be marked non-replaceable because of the duplication
3710 of the exit code outside the loop. In such a case, the copies
3711 we insert are dead and will be deleted. So they don't have
3712 a cost. Similar situations exist. */
3713 /* ??? The new final_[bg]iv_value code does a much better job
3714 of finding replaceable giv's, and hence this code may no longer
3716 if (! v
->replaceable
&& ! bl
->eliminable
3717 && REG_USERVAR_P (v
->dest_reg
))
3718 benefit
-= copy_cost
;
3720 /* Decrease the benefit to count the add-insns that we will
3721 insert to increment the reduced reg for the giv. */
3722 benefit
-= add_cost
* bl
->biv_count
;
3724 /* Decide whether to strength-reduce this giv or to leave the code
3725 unchanged (recompute it from the biv each time it is used).
3726 This decision can be made independently for each giv. */
3728 /* ??? Perhaps attempt to guess whether autoincrement will handle
3729 some of the new add insns; if so, can increase BENEFIT
3730 (undo the subtraction of add_cost that was done above). */
3732 /* If an insn is not to be strength reduced, then set its ignore
3733 flag, and clear all_reduced. */
3735 /* A giv that depends on a reversed biv must be reduced if it is
3736 used after the loop exit, otherwise, it would have the wrong
3737 value after the loop exit. To make it simple, just reduce all
3738 of such giv's whether or not we know they are used after the loop
3741 if (v
->lifetime
* threshold
* benefit
< insn_count
3744 if (loop_dump_stream
)
3745 fprintf (loop_dump_stream
,
3746 "giv of insn %d not worth while, %d vs %d.\n",
3748 v
->lifetime
* threshold
* benefit
, insn_count
);
3754 /* Check that we can increment the reduced giv without a
3755 multiply insn. If not, reject it. */
3757 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
3758 if (tv
->mult_val
== const1_rtx
3759 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
3761 if (loop_dump_stream
)
3762 fprintf (loop_dump_stream
,
3763 "giv of insn %d: would need a multiply.\n",
3764 INSN_UID (v
->insn
));
3772 /* Reduce each giv that we decided to reduce. */
3774 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3776 struct induction
*tv
;
3777 if (! v
->ignore
&& v
->same
== 0)
3779 v
->new_reg
= gen_reg_rtx (v
->mode
);
3781 /* For each place where the biv is incremented,
3782 add an insn to increment the new, reduced reg for the giv. */
3783 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
3785 if (tv
->mult_val
== const1_rtx
)
3786 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
3787 v
->new_reg
, v
->new_reg
, tv
->insn
);
3788 else /* tv->mult_val == const0_rtx */
3789 /* A multiply is acceptable here
3790 since this is presumed to be seldom executed. */
3791 emit_iv_add_mult (tv
->add_val
, v
->mult_val
,
3792 v
->add_val
, v
->new_reg
, tv
->insn
);
3795 /* Add code at loop start to initialize giv's reduced reg. */
3797 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
3798 v
->add_val
, v
->new_reg
, loop_start
);
3802 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
3805 For each giv register that can be reduced now: if replaceable,
3806 substitute reduced reg wherever the old giv occurs;
3807 else add new move insn "giv_reg = reduced_reg".
3809 Also check for givs whose first use is their definition and whose
3810 last use is the definition of another giv. If so, it is likely
3811 dead and should not be used to eliminate a biv. */
3812 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3814 if (v
->same
&& v
->same
->ignore
)
3820 if (v
->giv_type
== DEST_REG
3821 && regno_first_uid
[REGNO (v
->dest_reg
)] == INSN_UID (v
->insn
))
3823 struct induction
*v1
;
3825 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
3826 if (regno_last_uid
[REGNO (v
->dest_reg
)] == INSN_UID (v1
->insn
))
3830 /* Update expression if this was combined, in case other giv was
3833 v
->new_reg
= replace_rtx (v
->new_reg
,
3834 v
->same
->dest_reg
, v
->same
->new_reg
);
3836 if (v
->giv_type
== DEST_ADDR
)
3837 /* Store reduced reg as the address in the memref where we found
3839 *v
->location
= v
->new_reg
;
3840 else if (v
->replaceable
)
3842 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
3845 /* I can no longer duplicate the original problem. Perhaps
3846 this is unnecessary now? */
3848 /* Replaceable; it isn't strictly necessary to delete the old
3849 insn and emit a new one, because v->dest_reg is now dead.
3851 However, especially when unrolling loops, the special
3852 handling for (set REG0 REG1) in the second cse pass may
3853 make v->dest_reg live again. To avoid this problem, emit
3854 an insn to set the original giv reg from the reduced giv.
3855 We can not delete the original insn, since it may be part
3856 of a LIBCALL, and the code in flow that eliminates dead
3857 libcalls will fail if it is deleted. */
3858 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
3864 /* Not replaceable; emit an insn to set the original giv reg from
3865 the reduced giv, same as above. */
3866 emit_insn_after (gen_move_insn (v
->dest_reg
, v
->new_reg
),
3870 /* When a loop is reversed, givs which depend on the reversed
3871 biv, and which are live outside the loop, must be set to their
3872 correct final value. This insn is only needed if the giv is
3873 not replaceable. The correct final value is the same as the
3874 value that the giv starts the reversed loop with. */
3875 if (bl
->reversed
&& ! v
->replaceable
)
3876 emit_iv_add_mult (bl
->initial_value
, v
->mult_val
,
3877 v
->add_val
, v
->dest_reg
, end_insert_before
);
3878 else if (v
->final_value
)
3882 /* If the loop has multiple exits, emit the insn before the
3883 loop to ensure that it will always be executed no matter
3884 how the loop exits. Otherwise, emit the insn after the loop,
3885 since this is slightly more efficient. */
3886 if (loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]])
3887 insert_before
= loop_start
;
3889 insert_before
= end_insert_before
;
3890 emit_insn_before (gen_move_insn (v
->dest_reg
, v
->final_value
),
3894 /* If the insn to set the final value of the giv was emitted
3895 before the loop, then we must delete the insn inside the loop
3896 that sets it. If this is a LIBCALL, then we must delete
3897 every insn in the libcall. Note, however, that
3898 final_giv_value will only succeed when there are multiple
3899 exits if the giv is dead at each exit, hence it does not
3900 matter that the original insn remains because it is dead
3902 /* Delete the insn inside the loop that sets the giv since
3903 the giv is now set before (or after) the loop. */
3904 delete_insn (v
->insn
);
3908 if (loop_dump_stream
)
3910 fprintf (loop_dump_stream
, "giv at %d reduced to ",
3911 INSN_UID (v
->insn
));
3912 print_rtl (loop_dump_stream
, v
->new_reg
);
3913 fprintf (loop_dump_stream
, "\n");
3917 /* All the givs based on the biv bl have been reduced if they
3920 /* For each giv not marked as maybe dead that has been combined with a
3921 second giv, clear any "maybe dead" mark on that second giv.
3922 v->new_reg will either be or refer to the register of the giv it
3925 Doing this clearing avoids problems in biv elimination where a
3926 giv's new_reg is a complex value that can't be put in the insn but
3927 the giv combined with (with a reg as new_reg) is marked maybe_dead.
3928 Since the register will be used in either case, we'd prefer it be
3929 used from the simpler giv. */
3931 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
3932 if (! v
->maybe_dead
&& v
->same
)
3933 v
->same
->maybe_dead
= 0;
3935 /* Try to eliminate the biv, if it is a candidate.
3936 This won't work if ! all_reduced,
3937 since the givs we planned to use might not have been reduced.
3939 We have to be careful that we didn't initially think we could eliminate
3940 this biv because of a giv that we now think may be dead and shouldn't
3941 be used as a biv replacement.
3943 Also, there is the possibility that we may have a giv that looks
3944 like it can be used to eliminate a biv, but the resulting insn
3945 isn't valid. This can happen, for example, on the 88k, where a
3946 JUMP_INSN can compare a register only with zero. Attempts to
3947 replace it with a compare with a constant will fail.
3949 Note that in cases where this call fails, we may have replaced some
3950 of the occurrences of the biv with a giv, but no harm was done in
3951 doing so in the rare cases where it can occur. */
3953 if (all_reduced
== 1 && bl
->eliminable
3954 && maybe_eliminate_biv (bl
, loop_start
, end
, 1,
3955 threshold
, insn_count
))
3958 /* ?? If we created a new test to bypass the loop entirely,
3959 or otherwise drop straight in, based on this test, then
3960 we might want to rewrite it also. This way some later
3961 pass has more hope of removing the initialization of this
3964 /* If final_value != 0, then the biv may be used after loop end
3965 and we must emit an insn to set it just in case.
3967 Reversed bivs already have an insn after the loop setting their
3968 value, so we don't need another one. We can't calculate the
3969 proper final value for such a biv here anyways. */
3970 if (final_value
!= 0 && ! bl
->reversed
)
3974 /* If the loop has multiple exits, emit the insn before the
3975 loop to ensure that it will always be executed no matter
3976 how the loop exits. Otherwise, emit the insn after the
3977 loop, since this is slightly more efficient. */
3978 if (loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]])
3979 insert_before
= loop_start
;
3981 insert_before
= end_insert_before
;
3983 emit_insn_before (gen_move_insn (bl
->biv
->dest_reg
, final_value
),
3988 /* Delete all of the instructions inside the loop which set
3989 the biv, as they are all dead. If is safe to delete them,
3990 because an insn setting a biv will never be part of a libcall. */
3991 /* However, deleting them will invalidate the regno_last_uid info,
3992 so keeping them around is more convenient. Final_biv_value
3993 will only succeed when there are multiple exits if the biv
3994 is dead at each exit, hence it does not matter that the original
3995 insn remains, because it is dead anyways. */
3996 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
3997 delete_insn (v
->insn
);
4000 if (loop_dump_stream
)
4001 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
4006 /* Go through all the instructions in the loop, making all the
4007 register substitutions scheduled in REG_MAP. */
4009 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
4010 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4011 || GET_CODE (p
) == CALL_INSN
)
4013 replace_regs (PATTERN (p
), reg_map
, max_reg_before_loop
, 0);
4014 replace_regs (REG_NOTES (p
), reg_map
, max_reg_before_loop
, 0);
4018 /* Unroll loops from within strength reduction so that we can use the
4019 induction variable information that strength_reduce has already
4022 if (flag_unroll_loops
)
4023 unroll_loop (loop_end
, insn_count
, loop_start
, end_insert_before
, 1);
4025 if (loop_dump_stream
)
4026 fprintf (loop_dump_stream
, "\n");
4029 /* Return 1 if X is a valid source for an initial value (or as value being
4030 compared against in an initial test).
4032 X must be either a register or constant and must not be clobbered between
4033 the current insn and the start of the loop.
4035 INSN is the insn containing X. */
4038 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
4047 /* Only consider pseudos we know about initialized in insns whose luids
4049 if (GET_CODE (x
) != REG
4050 || REGNO (x
) >= max_reg_before_loop
)
4053 /* Don't use call-clobbered registers across a call which clobbers it. On
4054 some machines, don't use any hard registers at all. */
4055 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
4056 #ifndef SMALL_REGISTER_CLASSES
4057 && call_used_regs
[REGNO (x
)] && call_seen
4062 /* Don't use registers that have been clobbered before the start of the
4064 if (reg_set_between_p (x
, insn
, loop_start
))
4070 /* Scan X for memory refs and check each memory address
4071 as a possible giv. INSN is the insn whose pattern X comes from.
4072 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4073 every loop iteration. */
4076 find_mem_givs (x
, insn
, not_every_iteration
, loop_start
, loop_end
)
4079 int not_every_iteration
;
4080 rtx loop_start
, loop_end
;
4083 register enum rtx_code code
;
4089 code
= GET_CODE (x
);
4113 benefit
= general_induction_var (XEXP (x
, 0),
4114 &src_reg
, &add_val
, &mult_val
);
4116 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4117 Such a giv isn't useful. */
4118 if (benefit
> 0 && (mult_val
!= const1_rtx
|| add_val
!= const0_rtx
))
4120 /* Found one; record it. */
4122 = (struct induction
*) oballoc (sizeof (struct induction
));
4124 record_giv (v
, insn
, src_reg
, addr_placeholder
, mult_val
,
4125 add_val
, benefit
, DEST_ADDR
, not_every_iteration
,
4126 &XEXP (x
, 0), loop_start
, loop_end
);
4128 v
->mem_mode
= GET_MODE (x
);
4134 /* Recursively scan the subexpressions for other mem refs. */
4136 fmt
= GET_RTX_FORMAT (code
);
4137 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4139 find_mem_givs (XEXP (x
, i
), insn
, not_every_iteration
, loop_start
,
4141 else if (fmt
[i
] == 'E')
4142 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4143 find_mem_givs (XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
4144 loop_start
, loop_end
);
4147 /* Fill in the data about one biv update.
4148 V is the `struct induction' in which we record the biv. (It is
4149 allocated by the caller, with alloca.)
4150 INSN is the insn that sets it.
4151 DEST_REG is the biv's reg.
4153 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4154 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4155 being set to INC_VAL.
4157 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4158 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4159 can be executed more than once per iteration. If MAYBE_MULTIPLE
4160 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4161 executed exactly once per iteration. */
4164 record_biv (v
, insn
, dest_reg
, inc_val
, mult_val
,
4165 not_every_iteration
, maybe_multiple
)
4166 struct induction
*v
;
4171 int not_every_iteration
;
4174 struct iv_class
*bl
;
4177 v
->src_reg
= dest_reg
;
4178 v
->dest_reg
= dest_reg
;
4179 v
->mult_val
= mult_val
;
4180 v
->add_val
= inc_val
;
4181 v
->mode
= GET_MODE (dest_reg
);
4182 v
->always_computable
= ! not_every_iteration
;
4183 v
->maybe_multiple
= maybe_multiple
;
4185 /* Add this to the reg's iv_class, creating a class
4186 if this is the first incrementation of the reg. */
4188 bl
= reg_biv_class
[REGNO (dest_reg
)];
4191 /* Create and initialize new iv_class. */
4193 bl
= (struct iv_class
*) oballoc (sizeof (struct iv_class
));
4195 bl
->regno
= REGNO (dest_reg
);
4201 /* Set initial value to the reg itself. */
4202 bl
->initial_value
= dest_reg
;
4203 /* We haven't seen the initializing insn yet */
4206 bl
->initial_test
= 0;
4207 bl
->incremented
= 0;
4211 bl
->total_benefit
= 0;
4213 /* Add this class to loop_iv_list. */
4214 bl
->next
= loop_iv_list
;
4217 /* Put it in the array of biv register classes. */
4218 reg_biv_class
[REGNO (dest_reg
)] = bl
;
4221 /* Update IV_CLASS entry for this biv. */
4222 v
->next_iv
= bl
->biv
;
4225 if (mult_val
== const1_rtx
)
4226 bl
->incremented
= 1;
4228 if (loop_dump_stream
)
4230 fprintf (loop_dump_stream
,
4231 "Insn %d: possible biv, reg %d,",
4232 INSN_UID (insn
), REGNO (dest_reg
));
4233 if (GET_CODE (inc_val
) == CONST_INT
)
4234 fprintf (loop_dump_stream
, " const = %d\n",
4238 fprintf (loop_dump_stream
, " const = ");
4239 print_rtl (loop_dump_stream
, inc_val
);
4240 fprintf (loop_dump_stream
, "\n");
4245 /* Fill in the data about one giv.
4246 V is the `struct induction' in which we record the giv. (It is
4247 allocated by the caller, with alloca.)
4248 INSN is the insn that sets it.
4249 BENEFIT estimates the savings from deleting this insn.
4250 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4251 into a register or is used as a memory address.
4253 SRC_REG is the biv reg which the giv is computed from.
4254 DEST_REG is the giv's reg (if the giv is stored in a reg).
4255 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4256 LOCATION points to the place where this giv's value appears in INSN. */
4259 record_giv (v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, benefit
,
4260 type
, not_every_iteration
, location
, loop_start
, loop_end
)
4261 struct induction
*v
;
4265 rtx mult_val
, add_val
;
4268 int not_every_iteration
;
4270 rtx loop_start
, loop_end
;
4272 struct induction
*b
;
4273 struct iv_class
*bl
;
4274 rtx set
= single_set (insn
);
4278 v
->src_reg
= src_reg
;
4280 v
->dest_reg
= dest_reg
;
4281 v
->mult_val
= mult_val
;
4282 v
->add_val
= add_val
;
4283 v
->benefit
= benefit
;
4284 v
->location
= location
;
4286 v
->combined_with
= 0;
4287 v
->maybe_multiple
= 0;
4289 v
->derive_adjustment
= 0;
4295 /* The v->always_computable field is used in update_giv_derive, to
4296 determine whether a giv can be used to derive another giv. For a
4297 DEST_REG giv, INSN computes a new value for the giv, so its value
4298 isn't computable if INSN insn't executed every iteration.
4299 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4300 it does not compute a new value. Hence the value is always computable
4301 regardless of whether INSN is executed each iteration. */
4303 if (type
== DEST_ADDR
)
4304 v
->always_computable
= 1;
4306 v
->always_computable
= ! not_every_iteration
;
4308 if (type
== DEST_ADDR
)
4310 v
->mode
= GET_MODE (*location
);
4314 else /* type == DEST_REG */
4316 v
->mode
= GET_MODE (SET_DEST (set
));
4318 v
->lifetime
= (uid_luid
[regno_last_uid
[REGNO (dest_reg
)]]
4319 - uid_luid
[regno_first_uid
[REGNO (dest_reg
)]]);
4321 v
->times_used
= n_times_used
[REGNO (dest_reg
)];
4323 /* If the lifetime is zero, it means that this register is
4324 really a dead store. So mark this as a giv that can be
4325 ignored. This will not prevent the biv from being eliminated. */
4326 if (v
->lifetime
== 0)
4329 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
4330 reg_iv_info
[REGNO (dest_reg
)] = v
;
4333 /* Add the giv to the class of givs computed from one biv. */
4335 bl
= reg_biv_class
[REGNO (src_reg
)];
4338 v
->next_iv
= bl
->giv
;
4340 /* Don't count DEST_ADDR. This is supposed to count the number of
4341 insns that calculate givs. */
4342 if (type
== DEST_REG
)
4344 bl
->total_benefit
+= benefit
;
4347 /* Fatal error, biv missing for this giv? */
4350 if (type
== DEST_ADDR
)
4354 /* The giv can be replaced outright by the reduced register only if all
4355 of the following conditions are true:
4356 - the insn that sets the giv is always executed on any iteration
4357 on which the giv is used at all
4358 (there are two ways to deduce this:
4359 either the insn is executed on every iteration,
4360 or all uses follow that insn in the same basic block),
4361 - the giv is not used outside the loop
4362 - no assignments to the biv occur during the giv's lifetime. */
4364 if (regno_first_uid
[REGNO (dest_reg
)] == INSN_UID (insn
)
4365 /* Previous line always fails if INSN was moved by loop opt. */
4366 && uid_luid
[regno_last_uid
[REGNO (dest_reg
)]] < INSN_LUID (loop_end
)
4367 && (! not_every_iteration
4368 || last_use_this_basic_block (dest_reg
, insn
)))
4370 /* Now check that there are no assignments to the biv within the
4371 giv's lifetime. This requires two separate checks. */
4373 /* Check each biv update, and fail if any are between the first
4374 and last use of the giv.
4376 If this loop contains an inner loop that was unrolled, then
4377 the insn modifying the biv may have been emitted by the loop
4378 unrolling code, and hence does not have a valid luid. Just
4379 mark the biv as not replaceable in this case. It is not very
4380 useful as a biv, because it is used in two different loops.
4381 It is very unlikely that we would be able to optimize the giv
4382 using this biv anyways. */
4385 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
4387 if (INSN_UID (b
->insn
) >= max_uid_for_loop
4388 || ((uid_luid
[INSN_UID (b
->insn
)]
4389 >= uid_luid
[regno_first_uid
[REGNO (dest_reg
)]])
4390 && (uid_luid
[INSN_UID (b
->insn
)]
4391 <= uid_luid
[regno_last_uid
[REGNO (dest_reg
)]])))
4394 v
->not_replaceable
= 1;
4399 /* Check each insn between the first and last use of the giv,
4400 and fail if any of them are branches that jump to a named label
4401 outside this range, but still inside the loop. This catches
4402 cases of spaghetti code where the execution order of insns
4403 is not linear, and hence the above test fails. For example,
4404 in the following code, j is not replaceable:
4405 for (i = 0; i < 100; ) {
4406 L0: j = 4*i; goto L1;
4410 printf ("k = %d\n", k); }
4411 This test is conservative, but this test succeeds rarely enough
4412 that it isn't a problem. See also check_final_value below. */
4416 INSN_UID (p
) >= max_uid_for_loop
4417 || INSN_LUID (p
) < uid_luid
[regno_last_uid
[REGNO (dest_reg
)]];
4420 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
4421 && LABEL_NAME (JUMP_LABEL (p
))
4422 && ((INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
)
4423 && (INSN_LUID (JUMP_LABEL (p
))
4424 < uid_luid
[regno_first_uid
[REGNO (dest_reg
)]]))
4425 || (INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
)
4426 && (INSN_LUID (JUMP_LABEL (p
))
4427 > uid_luid
[regno_last_uid
[REGNO (dest_reg
)]]))))
4430 v
->not_replaceable
= 1;
4432 if (loop_dump_stream
)
4433 fprintf (loop_dump_stream
,
4434 "Found branch outside giv lifetime.\n");
4442 /* May still be replaceable, we don't have enough info here to
4445 v
->not_replaceable
= 0;
4449 if (loop_dump_stream
)
4451 if (type
== DEST_REG
)
4452 fprintf (loop_dump_stream
, "Insn %d: giv reg %d",
4453 INSN_UID (insn
), REGNO (dest_reg
));
4455 fprintf (loop_dump_stream
, "Insn %d: dest address",
4458 fprintf (loop_dump_stream
, " src reg %d benefit %d",
4459 REGNO (src_reg
), v
->benefit
);
4460 fprintf (loop_dump_stream
, " used %d lifetime %d",
4461 v
->times_used
, v
->lifetime
);
4464 fprintf (loop_dump_stream
, " replaceable");
4466 if (GET_CODE (mult_val
) == CONST_INT
)
4467 fprintf (loop_dump_stream
, " mult %d",
4471 fprintf (loop_dump_stream
, " mult ");
4472 print_rtl (loop_dump_stream
, mult_val
);
4475 if (GET_CODE (add_val
) == CONST_INT
)
4476 fprintf (loop_dump_stream
, " add %d",
4480 fprintf (loop_dump_stream
, " add ");
4481 print_rtl (loop_dump_stream
, add_val
);
4485 if (loop_dump_stream
)
4486 fprintf (loop_dump_stream
, "\n");
4491 /* All this does is determine whether a giv can be made replaceable because
4492 its final value can be calculated. This code can not be part of record_giv
4493 above, because final_giv_value requires that the number of loop iterations
4494 be known, and that can not be accurately calculated until after all givs
4495 have been identified. */
4498 check_final_value (v
, loop_start
, loop_end
)
4499 struct induction
*v
;
4500 rtx loop_start
, loop_end
;
4502 struct iv_class
*bl
;
4503 rtx final_value
= 0;
4505 bl
= reg_biv_class
[REGNO (v
->src_reg
)];
4507 /* DEST_ADDR givs will never reach here, because they are always marked
4508 replaceable above in record_giv. */
4510 /* The giv can be replaced outright by the reduced register only if all
4511 of the following conditions are true:
4512 - the insn that sets the giv is always executed on any iteration
4513 on which the giv is used at all
4514 (there are two ways to deduce this:
4515 either the insn is executed on every iteration,
4516 or all uses follow that insn in the same basic block),
4517 - its final value can be calculated (this condition is different
4518 than the one above in record_giv)
4519 - no assignments to the biv occur during the giv's lifetime. */
4522 /* This is only called now when replaceable is known to be false. */
4523 /* Clear replaceable, so that it won't confuse final_giv_value. */
4527 if ((final_value
= final_giv_value (v
, loop_start
, loop_end
))
4528 && (v
->always_computable
|| last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
4530 int biv_increment_seen
= 0;
4536 /* When trying to determine whether or not a biv increment occurs
4537 during the lifetime of the giv, we can ignore uses of the variable
4538 outside the loop because final_value is true. Hence we can not
4539 use regno_last_uid and regno_first_uid as above in record_giv. */
4541 /* Search the loop to determine whether any assignments to the
4542 biv occur during the giv's lifetime. Start with the insn
4543 that sets the giv, and search around the loop until we come
4544 back to that insn again.
4546 Also fail if there is a jump within the giv's lifetime that jumps
4547 to somewhere outside the lifetime but still within the loop. This
4548 catches spaghetti code where the execution order is not linear, and
4549 hence the above test fails. Here we assume that the giv lifetime
4550 does not extend from one iteration of the loop to the next, so as
4551 to make the test easier. Since the lifetime isn't known yet,
4552 this requires two loops. See also record_giv above. */
4554 last_giv_use
= v
->insn
;
4560 p
= NEXT_INSN (loop_start
);
4564 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
4565 || GET_CODE (p
) == CALL_INSN
)
4567 if (biv_increment_seen
)
4569 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4572 v
->not_replaceable
= 1;
4576 else if (GET_CODE (PATTERN (p
)) == SET
4577 && SET_DEST (PATTERN (p
)) == v
->src_reg
)
4578 biv_increment_seen
= 1;
4579 else if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
4584 /* Now that the lifetime of the giv is known, check for branches
4585 from within the lifetime to outside the lifetime if it is still
4595 p
= NEXT_INSN (loop_start
);
4596 if (p
== last_giv_use
)
4599 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
4600 && LABEL_NAME (JUMP_LABEL (p
))
4601 && ((INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (v
->insn
)
4602 && INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop_start
))
4603 || (INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (last_giv_use
)
4604 && INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop_end
))))
4607 v
->not_replaceable
= 1;
4609 if (loop_dump_stream
)
4610 fprintf (loop_dump_stream
,
4611 "Found branch outside giv lifetime.\n");
4618 /* If it is replaceable, then save the final value. */
4620 v
->final_value
= final_value
;
4623 if (loop_dump_stream
&& v
->replaceable
)
4624 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
4625 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
4628 /* Update the status of whether a giv can derive other givs.
4630 We need to do something special if there is or may be an update to the biv
4631 between the time the giv is defined and the time it is used to derive
4634 In addition, a giv that is only conditionally set is not allowed to
4635 derive another giv once a label has been passed.
4637 The cases we look at are when a label or an update to a biv is passed. */
4640 update_giv_derive (p
)
4643 struct iv_class
*bl
;
4644 struct induction
*biv
, *giv
;
4648 /* Search all IV classes, then all bivs, and finally all givs.
4650 There are three cases we are concerned with. First we have the situation
4651 of a giv that is only updated conditionally. In that case, it may not
4652 derive any givs after a label is passed.
4654 The second case is when a biv update occurs, or may occur, after the
4655 definition of a giv. For certain biv updates (see below) that are
4656 known to occur between the giv definition and use, we can adjust the
4657 giv definition. For others, or when the biv update is conditional,
4658 we must prevent the giv from deriving any other givs. There are two
4659 sub-cases within this case.
4661 If this is a label, we are concerned with any biv update that is done
4662 conditionally, since it may be done after the giv is defined followed by
4663 a branch here (actually, we need to pass both a jump and a label, but
4664 this extra tracking doesn't seem worth it).
4666 If this is a jump, we are concerned about any biv update that may be
4667 executed multiple times. We are actually only concerned about
4668 backward jumps, but it is probably not worth performing the test
4669 on the jump again here.
4671 If this is a biv update, we must adjust the giv status to show that a
4672 subsequent biv update was performed. If this adjustment cannot be done,
4673 the giv cannot derive further givs. */
4675 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
4676 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
4677 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
4680 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
4682 /* If cant_derive is already true, there is no point in
4683 checking all of these conditions again. */
4684 if (giv
->cant_derive
)
4687 /* If this giv is conditionally set and we have passed a label,
4688 it cannot derive anything. */
4689 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
4690 giv
->cant_derive
= 1;
4692 /* Skip givs that have mult_val == 0, since
4693 they are really invariants. Also skip those that are
4694 replaceable, since we know their lifetime doesn't contain
4696 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
4699 /* The only way we can allow this giv to derive another
4700 is if this is a biv increment and we can form the product
4701 of biv->add_val and giv->mult_val. In this case, we will
4702 be able to compute a compensation. */
4703 else if (biv
->insn
== p
)
4707 if (biv
->mult_val
== const1_rtx
)
4708 tem
= simplify_giv_expr (gen_rtx (MULT
, giv
->mode
,
4713 if (tem
&& giv
->derive_adjustment
)
4714 tem
= simplify_giv_expr (gen_rtx (PLUS
, giv
->mode
, tem
,
4715 giv
->derive_adjustment
),
4718 giv
->derive_adjustment
= tem
;
4720 giv
->cant_derive
= 1;
4722 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
4723 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
4724 giv
->cant_derive
= 1;
4729 /* Check whether an insn is an increment legitimate for a basic induction var.
4730 X is the source of insn P, or a part of it.
4731 MODE is the mode in which X should be interpreted.
4733 DEST_REG is the putative biv, also the destination of the insn.
4734 We accept patterns of these forms:
4735 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
4736 REG = INVARIANT + REG
4738 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
4739 and store the additive term into *INC_VAL.
4741 If X is an assignment of an invariant into DEST_REG, we set
4742 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
4744 We also want to detect a BIV when it corresponds to a variable
4745 whose mode was promoted via PROMOTED_MODE. In that case, an increment
4746 of the variable may be a PLUS that adds a SUBREG of that variable to
4747 an invariant and then sign- or zero-extends the result of the PLUS
4750 Most GIVs in such cases will be in the promoted mode, since that is the
4751 probably the natural computation mode (and almost certainly the mode
4752 used for addresses) on the machine. So we view the pseudo-reg containing
4753 the variable as the BIV, as if it were simply incremented.
4755 Note that treating the entire pseudo as a BIV will result in making
4756 simple increments to any GIVs based on it. However, if the variable
4757 overflows in its declared mode but not its promoted mode, the result will
4758 be incorrect. This is acceptable if the variable is signed, since
4759 overflows in such cases are undefined, but not if it is unsigned, since
4760 those overflows are defined. So we only check for SIGN_EXTEND and
4763 If we cannot find a biv, we return 0. */
4766 basic_induction_var (x
, mode
, dest_reg
, p
, inc_val
, mult_val
)
4768 enum machine_mode mode
;
4774 register enum rtx_code code
;
4778 code
= GET_CODE (x
);
4782 if (XEXP (x
, 0) == dest_reg
4783 || (GET_CODE (XEXP (x
, 0)) == SUBREG
4784 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
4785 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
4787 else if (XEXP (x
, 1) == dest_reg
4788 || (GET_CODE (XEXP (x
, 1)) == SUBREG
4789 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
4790 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
4795 if (invariant_p (arg
) != 1)
4798 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
4799 *mult_val
= const1_rtx
;
4803 /* If this is a SUBREG for a promoted variable, check the inner
4805 if (SUBREG_PROMOTED_VAR_P (x
))
4806 return basic_induction_var (SUBREG_REG (x
), GET_MODE (SUBREG_REG (x
)),
4807 dest_reg
, p
, inc_val
, mult_val
);
4810 /* If this register is assigned in the previous insn, look at its
4811 source, but don't go outside the loop or past a label. */
4813 for (insn
= PREV_INSN (p
);
4814 (insn
&& GET_CODE (insn
) == NOTE
4815 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
4816 insn
= PREV_INSN (insn
))
4820 set
= single_set (insn
);
4822 if (set
!= 0 && SET_DEST (set
) == x
)
4823 return basic_induction_var (SET_SRC (set
),
4824 (GET_MODE (SET_SRC (set
)) == VOIDmode
4826 : GET_MODE (SET_SRC (set
))),
4829 /* ... fall through ... */
4831 /* Can accept constant setting of biv only when inside inner most loop.
4832 Otherwise, a biv of an inner loop may be incorrectly recognized
4833 as a biv of the outer loop,
4834 causing code to be moved INTO the inner loop. */
4836 if (invariant_p (x
) != 1)
4841 if (loops_enclosed
== 1)
4843 /* Possible bug here? Perhaps we don't know the mode of X. */
4844 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
4845 *mult_val
= const0_rtx
;
4852 return basic_induction_var (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
4853 dest_reg
, p
, inc_val
, mult_val
);
4855 /* Similar, since this can be a sign extension. */
4856 for (insn
= PREV_INSN (p
);
4857 (insn
&& GET_CODE (insn
) == NOTE
4858 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
4859 insn
= PREV_INSN (insn
))
4863 set
= single_set (insn
);
4865 if (set
&& SET_DEST (set
) == XEXP (x
, 0)
4866 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4867 && INTVAL (XEXP (x
, 1)) >= 0
4868 && GET_CODE (SET_SRC (set
)) == ASHIFT
4869 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
4870 return basic_induction_var (XEXP (SET_SRC (set
), 0),
4871 GET_MODE (XEXP (x
, 0)),
4872 dest_reg
, insn
, inc_val
, mult_val
);
4880 /* A general induction variable (giv) is any quantity that is a linear
4881 function of a basic induction variable,
4882 i.e. giv = biv * mult_val + add_val.
4883 The coefficients can be any loop invariant quantity.
4884 A giv need not be computed directly from the biv;
4885 it can be computed by way of other givs. */
4887 /* Determine whether X computes a giv.
4888 If it does, return a nonzero value
4889 which is the benefit from eliminating the computation of X;
4890 set *SRC_REG to the register of the biv that it is computed from;
4891 set *ADD_VAL and *MULT_VAL to the coefficients,
4892 such that the value of X is biv * mult + add; */
4895 general_induction_var (x
, src_reg
, add_val
, mult_val
)
4905 /* If this is an invariant, forget it, it isn't a giv. */
4906 if (invariant_p (x
) == 1)
4909 /* See if the expression could be a giv and get its form.
4910 Mark our place on the obstack in case we don't find a giv. */
4911 storage
= (char *) oballoc (0);
4912 x
= simplify_giv_expr (x
, &benefit
);
4919 switch (GET_CODE (x
))
4923 /* Since this is now an invariant and wasn't before, it must be a giv
4924 with MULT_VAL == 0. It doesn't matter which BIV we associate this
4926 *src_reg
= loop_iv_list
->biv
->dest_reg
;
4927 *mult_val
= const0_rtx
;
4932 /* This is equivalent to a BIV. */
4934 *mult_val
= const1_rtx
;
4935 *add_val
= const0_rtx
;
4939 /* Either (plus (biv) (invar)) or
4940 (plus (mult (biv) (invar_1)) (invar_2)). */
4941 if (GET_CODE (XEXP (x
, 0)) == MULT
)
4943 *src_reg
= XEXP (XEXP (x
, 0), 0);
4944 *mult_val
= XEXP (XEXP (x
, 0), 1);
4948 *src_reg
= XEXP (x
, 0);
4949 *mult_val
= const1_rtx
;
4951 *add_val
= XEXP (x
, 1);
4955 /* ADD_VAL is zero. */
4956 *src_reg
= XEXP (x
, 0);
4957 *mult_val
= XEXP (x
, 1);
4958 *add_val
= const0_rtx
;
4965 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
4966 unless they are CONST_INT). */
4967 if (GET_CODE (*add_val
) == USE
)
4968 *add_val
= XEXP (*add_val
, 0);
4969 if (GET_CODE (*mult_val
) == USE
)
4970 *mult_val
= XEXP (*mult_val
, 0);
4972 benefit
+= rtx_cost (orig_x
, SET
);
4974 /* Always return some benefit if this is a giv so it will be detected
4975 as such. This allows elimination of bivs that might otherwise
4976 not be eliminated. */
4977 return benefit
== 0 ? 1 : benefit
;
4980 /* Given an expression, X, try to form it as a linear function of a biv.
4981 We will canonicalize it to be of the form
4982 (plus (mult (BIV) (invar_1))
4984 with possible degeneracies.
4986 The invariant expressions must each be of a form that can be used as a
4987 machine operand. We surround then with a USE rtx (a hack, but localized
4988 and certainly unambiguous!) if not a CONST_INT for simplicity in this
4989 routine; it is the caller's responsibility to strip them.
4991 If no such canonicalization is possible (i.e., two biv's are used or an
4992 expression that is neither invariant nor a biv or giv), this routine
4995 For a non-zero return, the result will have a code of CONST_INT, USE,
4996 REG (for a BIV), PLUS, or MULT. No other codes will occur.
4998 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5001 simplify_giv_expr (x
, benefit
)
5005 enum machine_mode mode
= GET_MODE (x
);
5009 /* If this is not an integer mode, or if we cannot do arithmetic in this
5010 mode, this can't be a giv. */
5011 if (mode
!= VOIDmode
5012 && (GET_MODE_CLASS (mode
) != MODE_INT
5013 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
5016 switch (GET_CODE (x
))
5019 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5020 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5021 if (arg0
== 0 || arg1
== 0)
5024 /* Put constant last, CONST_INT last if both constant. */
5025 if ((GET_CODE (arg0
) == USE
5026 || GET_CODE (arg0
) == CONST_INT
)
5027 && GET_CODE (arg1
) != CONST_INT
)
5028 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5030 /* Handle addition of zero, then addition of an invariant. */
5031 if (arg1
== const0_rtx
)
5033 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
5034 switch (GET_CODE (arg0
))
5038 /* Both invariant. Only valid if sum is machine operand.
5039 First strip off possible USE on first operand. */
5040 if (GET_CODE (arg0
) == USE
)
5041 arg0
= XEXP (arg0
, 0);
5044 if (CONSTANT_P (arg0
) && GET_CODE (arg1
) == CONST_INT
)
5046 tem
= plus_constant (arg0
, INTVAL (arg1
));
5047 if (GET_CODE (tem
) != CONST_INT
)
5048 tem
= gen_rtx (USE
, mode
, tem
);
5055 /* biv + invar or mult + invar. Return sum. */
5056 return gen_rtx (PLUS
, mode
, arg0
, arg1
);
5059 /* (a + invar_1) + invar_2. Associate. */
5060 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5062 gen_rtx (PLUS
, mode
,
5063 XEXP (arg0
, 1), arg1
)),
5070 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5071 MULT to reduce cases. */
5072 if (GET_CODE (arg0
) == REG
)
5073 arg0
= gen_rtx (MULT
, mode
, arg0
, const1_rtx
);
5074 if (GET_CODE (arg1
) == REG
)
5075 arg1
= gen_rtx (MULT
, mode
, arg1
, const1_rtx
);
5077 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5078 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5079 Recurse to associate the second PLUS. */
5080 if (GET_CODE (arg1
) == MULT
)
5081 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5083 if (GET_CODE (arg1
) == PLUS
)
5084 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5085 gen_rtx (PLUS
, mode
,
5086 arg0
, XEXP (arg1
, 0)),
5090 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5091 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
5094 if (XEXP (arg0
, 0) != XEXP (arg1
, 0))
5097 return simplify_giv_expr (gen_rtx (MULT
, mode
,
5099 gen_rtx (PLUS
, mode
,
5105 /* Handle "a - b" as "a + b * (-1)". */
5106 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5108 gen_rtx (MULT
, mode
,
5109 XEXP (x
, 1), constm1_rtx
)),
5113 arg0
= simplify_giv_expr (XEXP (x
, 0), benefit
);
5114 arg1
= simplify_giv_expr (XEXP (x
, 1), benefit
);
5115 if (arg0
== 0 || arg1
== 0)
5118 /* Put constant last, CONST_INT last if both constant. */
5119 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
5120 && GET_CODE (arg1
) != CONST_INT
)
5121 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
5123 /* If second argument is not now constant, not giv. */
5124 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
5127 /* Handle multiply by 0 or 1. */
5128 if (arg1
== const0_rtx
)
5131 else if (arg1
== const1_rtx
)
5134 switch (GET_CODE (arg0
))
5137 /* biv * invar. Done. */
5138 return gen_rtx (MULT
, mode
, arg0
, arg1
);
5141 /* Product of two constants. */
5142 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
5145 /* invar * invar. Not giv. */
5149 /* (a * invar_1) * invar_2. Associate. */
5150 return simplify_giv_expr (gen_rtx (MULT
, mode
,
5152 gen_rtx (MULT
, mode
,
5153 XEXP (arg0
, 1), arg1
)),
5157 /* (a + invar_1) * invar_2. Distribute. */
5158 return simplify_giv_expr (gen_rtx (PLUS
, mode
,
5159 gen_rtx (MULT
, mode
,
5160 XEXP (arg0
, 0), arg1
),
5161 gen_rtx (MULT
, mode
,
5162 XEXP (arg0
, 1), arg1
)),
5170 /* Shift by constant is multiply by power of two. */
5171 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5174 return simplify_giv_expr (gen_rtx (MULT
, mode
,
5176 GEN_INT ((HOST_WIDE_INT
) 1
5177 << INTVAL (XEXP (x
, 1)))),
5181 /* "-a" is "a * (-1)" */
5182 return simplify_giv_expr (gen_rtx (MULT
, mode
, XEXP (x
, 0), constm1_rtx
),
5186 /* "~a" is "-a - 1". Silly, but easy. */
5187 return simplify_giv_expr (gen_rtx (MINUS
, mode
,
5188 gen_rtx (NEG
, mode
, XEXP (x
, 0)),
5193 /* Already in proper form for invariant. */
5197 /* If this is a new register, we can't deal with it. */
5198 if (REGNO (x
) >= max_reg_before_loop
)
5201 /* Check for biv or giv. */
5202 switch (reg_iv_type
[REGNO (x
)])
5206 case GENERAL_INDUCT
:
5208 struct induction
*v
= reg_iv_info
[REGNO (x
)];
5210 /* Form expression from giv and add benefit. Ensure this giv
5211 can derive another and subtract any needed adjustment if so. */
5212 *benefit
+= v
->benefit
;
5216 tem
= gen_rtx (PLUS
, mode
, gen_rtx (MULT
, mode
,
5217 v
->src_reg
, v
->mult_val
),
5219 if (v
->derive_adjustment
)
5220 tem
= gen_rtx (MINUS
, mode
, tem
, v
->derive_adjustment
);
5221 return simplify_giv_expr (tem
, benefit
);
5225 /* Fall through to general case. */
5227 /* If invariant, return as USE (unless CONST_INT).
5228 Otherwise, not giv. */
5229 if (GET_CODE (x
) == USE
)
5232 if (invariant_p (x
) == 1)
5234 if (GET_CODE (x
) == CONST_INT
)
5237 return gen_rtx (USE
, mode
, x
);
5244 /* Help detect a giv that is calculated by several consecutive insns;
5248 The caller has already identified the first insn P as having a giv as dest;
5249 we check that all other insns that set the same register follow
5250 immediately after P, that they alter nothing else,
5251 and that the result of the last is still a giv.
5253 The value is 0 if the reg set in P is not really a giv.
5254 Otherwise, the value is the amount gained by eliminating
5255 all the consecutive insns that compute the value.
5257 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5258 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5260 The coefficients of the ultimate giv value are stored in
5261 *MULT_VAL and *ADD_VAL. */
5264 consec_sets_giv (first_benefit
, p
, src_reg
, dest_reg
,
5279 /* Indicate that this is a giv so that we can update the value produced in
5280 each insn of the multi-insn sequence.
5282 This induction structure will be used only by the call to
5283 general_induction_var below, so we can allocate it on our stack.
5284 If this is a giv, our caller will replace the induct var entry with
5285 a new induction structure. */
5287 = (struct induction
*) alloca (sizeof (struct induction
));
5288 v
->src_reg
= src_reg
;
5289 v
->mult_val
= *mult_val
;
5290 v
->add_val
= *add_val
;
5291 v
->benefit
= first_benefit
;
5293 v
->derive_adjustment
= 0;
5295 reg_iv_type
[REGNO (dest_reg
)] = GENERAL_INDUCT
;
5296 reg_iv_info
[REGNO (dest_reg
)] = v
;
5298 count
= n_times_set
[REGNO (dest_reg
)] - 1;
5303 code
= GET_CODE (p
);
5305 /* If libcall, skip to end of call sequence. */
5306 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
5310 && (set
= single_set (p
))
5311 && GET_CODE (SET_DEST (set
)) == REG
5312 && SET_DEST (set
) == dest_reg
5313 && ((benefit
= general_induction_var (SET_SRC (set
), &src_reg
,
5315 /* Giv created by equivalent expression. */
5316 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
5317 && (benefit
= general_induction_var (XEXP (temp
, 0), &src_reg
,
5318 add_val
, mult_val
))))
5319 && src_reg
== v
->src_reg
)
5321 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
5322 benefit
+= libcall_benefit (p
);
5325 v
->mult_val
= *mult_val
;
5326 v
->add_val
= *add_val
;
5327 v
->benefit
= benefit
;
5329 else if (code
!= NOTE
)
5331 /* Allow insns that set something other than this giv to a
5332 constant. Such insns are needed on machines which cannot
5333 include long constants and should not disqualify a giv. */
5335 && (set
= single_set (p
))
5336 && SET_DEST (set
) != dest_reg
5337 && CONSTANT_P (SET_SRC (set
)))
5340 reg_iv_type
[REGNO (dest_reg
)] = UNKNOWN_INDUCT
;
5348 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5349 represented by G1. If no such expression can be found, or it is clear that
5350 it cannot possibly be a valid address, 0 is returned.
5352 To perform the computation, we note that
5355 where `v' is the biv.
5357 So G2 = (c/a) * G1 + (d - b*c/a) */
5361 express_from (g1
, g2
)
5362 struct induction
*g1
, *g2
;
5366 /* The value that G1 will be multiplied by must be a constant integer. Also,
5367 the only chance we have of getting a valid address is if b*c/a (see above
5368 for notation) is also an integer. */
5369 if (GET_CODE (g1
->mult_val
) != CONST_INT
5370 || GET_CODE (g2
->mult_val
) != CONST_INT
5371 || GET_CODE (g1
->add_val
) != CONST_INT
5372 || g1
->mult_val
== const0_rtx
5373 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
5376 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
5377 add
= plus_constant (g2
->add_val
, - INTVAL (g1
->add_val
) * INTVAL (mult
));
5379 /* Form simplified final result. */
5380 if (mult
== const0_rtx
)
5382 else if (mult
== const1_rtx
)
5383 mult
= g1
->dest_reg
;
5385 mult
= gen_rtx (MULT
, g2
->mode
, g1
->dest_reg
, mult
);
5387 if (add
== const0_rtx
)
5390 return gen_rtx (PLUS
, g2
->mode
, mult
, add
);
5394 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5395 (either directly or via an address expression) a register used to represent
5396 G1. Set g2->new_reg to a represtation of G1 (normally just
5400 combine_givs_p (g1
, g2
)
5401 struct induction
*g1
, *g2
;
5405 /* If these givs are identical, they can be combined. */
5406 if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
)
5407 && rtx_equal_p (g1
->add_val
, g2
->add_val
))
5409 g2
->new_reg
= g1
->dest_reg
;
5414 /* If G2 can be expressed as a function of G1 and that function is valid
5415 as an address and no more expensive than using a register for G2,
5416 the expression of G2 in terms of G1 can be used. */
5417 if (g2
->giv_type
== DEST_ADDR
5418 && (tem
= express_from (g1
, g2
)) != 0
5419 && memory_address_p (g2
->mem_mode
, tem
)
5420 && ADDRESS_COST (tem
) <= ADDRESS_COST (*g2
->location
))
5430 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5431 any other. If so, point SAME to the giv combined with and set NEW_REG to
5432 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5433 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5437 struct iv_class
*bl
;
5439 struct induction
*g1
, *g2
;
5442 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
5443 for (pass
= 0; pass
<= 1; pass
++)
5444 for (g2
= bl
->giv
; g2
; g2
= g2
->next_iv
)
5446 /* First try to combine with replaceable givs, then all givs. */
5447 && (g1
->replaceable
|| pass
== 1)
5448 /* If either has already been combined or is to be ignored, can't
5450 && ! g1
->ignore
&& ! g2
->ignore
&& ! g1
->same
&& ! g2
->same
5451 /* If something has been based on G2, G2 cannot itself be based
5452 on something else. */
5453 && ! g2
->combined_with
5454 && combine_givs_p (g1
, g2
))
5456 /* g2->new_reg set by `combine_givs_p' */
5458 g1
->combined_with
= 1;
5459 g1
->benefit
+= g2
->benefit
;
5460 /* ??? The new final_[bg]iv_value code does a much better job
5461 of finding replaceable giv's, and hence this code may no
5462 longer be necessary. */
5463 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
5464 g1
->benefit
-= copy_cost
;
5465 g1
->lifetime
+= g2
->lifetime
;
5466 g1
->times_used
+= g2
->times_used
;
5468 if (loop_dump_stream
)
5469 fprintf (loop_dump_stream
, "giv at %d combined with giv at %d\n",
5470 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
));
5474 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5477 emit_iv_add_mult (b
, m
, a
, reg
, insert_before
)
5478 rtx b
; /* initial value of basic induction variable */
5479 rtx m
; /* multiplicative constant */
5480 rtx a
; /* additive constant */
5481 rtx reg
; /* destination register */
5487 /* Prevent unexpected sharing of these rtx. */
5491 /* Increase the lifetime of any invariants moved further in code. */
5492 update_reg_last_use (a
, insert_before
);
5493 update_reg_last_use (b
, insert_before
);
5494 update_reg_last_use (m
, insert_before
);
5497 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 0);
5499 emit_move_insn (reg
, result
);
5500 seq
= gen_sequence ();
5503 emit_insn_before (seq
, insert_before
);
5506 /* Test whether A * B can be computed without
5507 an actual multiply insn. Value is 1 if so. */
5510 product_cheap_p (a
, b
)
5516 struct obstack
*old_rtl_obstack
= rtl_obstack
;
5517 char *storage
= (char *) obstack_alloc (&temp_obstack
, 0);
5520 /* If only one is constant, make it B. */
5521 if (GET_CODE (a
) == CONST_INT
)
5522 tmp
= a
, a
= b
, b
= tmp
;
5524 /* If first constant, both constant, so don't need multiply. */
5525 if (GET_CODE (a
) == CONST_INT
)
5528 /* If second not constant, neither is constant, so would need multiply. */
5529 if (GET_CODE (b
) != CONST_INT
)
5532 /* One operand is constant, so might not need multiply insn. Generate the
5533 code for the multiply and see if a call or multiply, or long sequence
5534 of insns is generated. */
5536 rtl_obstack
= &temp_obstack
;
5538 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 0);
5539 tmp
= gen_sequence ();
5542 if (GET_CODE (tmp
) == SEQUENCE
)
5544 if (XVEC (tmp
, 0) == 0)
5546 else if (XVECLEN (tmp
, 0) > 3)
5549 for (i
= 0; i
< XVECLEN (tmp
, 0); i
++)
5551 rtx insn
= XVECEXP (tmp
, 0, i
);
5553 if (GET_CODE (insn
) != INSN
5554 || (GET_CODE (PATTERN (insn
)) == SET
5555 && GET_CODE (SET_SRC (PATTERN (insn
))) == MULT
)
5556 || (GET_CODE (PATTERN (insn
)) == PARALLEL
5557 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
5558 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn
), 0, 0))) == MULT
))
5565 else if (GET_CODE (tmp
) == SET
5566 && GET_CODE (SET_SRC (tmp
)) == MULT
)
5568 else if (GET_CODE (tmp
) == PARALLEL
5569 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
5570 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
5573 /* Free any storage we obtained in generating this multiply and restore rtl
5574 allocation to its normal obstack. */
5575 obstack_free (&temp_obstack
, storage
);
5576 rtl_obstack
= old_rtl_obstack
;
5581 /* Check to see if loop can be terminated by a "decrement and branch until
5582 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
5583 Also try reversing an increment loop to a decrement loop
5584 to see if the optimization can be performed.
5585 Value is nonzero if optimization was performed. */
5587 /* This is useful even if the architecture doesn't have such an insn,
5588 because it might change a loops which increments from 0 to n to a loop
5589 which decrements from n to 0. A loop that decrements to zero is usually
5590 faster than one that increments from zero. */
5592 /* ??? This could be rewritten to use some of the loop unrolling procedures,
5593 such as approx_final_value, biv_total_increment, loop_iterations, and
5594 final_[bg]iv_value. */
5597 check_dbra_loop (loop_end
, insn_count
, loop_start
)
5602 struct iv_class
*bl
;
5609 rtx before_comparison
;
5612 /* If last insn is a conditional branch, and the insn before tests a
5613 register value, try to optimize it. Otherwise, we can't do anything. */
5615 comparison
= get_condition_for_loop (PREV_INSN (loop_end
));
5616 if (comparison
== 0)
5619 /* Check all of the bivs to see if the compare uses one of them.
5620 Skip biv's set more than once because we can't guarantee that
5621 it will be zero on the last iteration. Also skip if the biv is
5622 used between its update and the test insn. */
5624 for (bl
= loop_iv_list
; bl
; bl
= bl
->next
)
5626 if (bl
->biv_count
== 1
5627 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
5628 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
5629 PREV_INSN (PREV_INSN (loop_end
))))
5636 /* Look for the case where the basic induction variable is always
5637 nonnegative, and equals zero on the last iteration.
5638 In this case, add a reg_note REG_NONNEG, which allows the
5639 m68k DBRA instruction to be used. */
5641 if (((GET_CODE (comparison
) == GT
5642 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
5643 && INTVAL (XEXP (comparison
, 1)) == -1)
5644 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
5645 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
5646 && INTVAL (bl
->biv
->add_val
) < 0)
5648 /* Initial value must be greater than 0,
5649 init_val % -dec_value == 0 to ensure that it equals zero on
5650 the last iteration */
5652 if (GET_CODE (bl
->initial_value
) == CONST_INT
5653 && INTVAL (bl
->initial_value
) > 0
5654 && (INTVAL (bl
->initial_value
) %
5655 (-INTVAL (bl
->biv
->add_val
))) == 0)
5657 /* register always nonnegative, add REG_NOTE to branch */
5658 REG_NOTES (PREV_INSN (loop_end
))
5659 = gen_rtx (EXPR_LIST
, REG_NONNEG
, NULL_RTX
,
5660 REG_NOTES (PREV_INSN (loop_end
)));
5666 /* If the decrement is 1 and the value was tested as >= 0 before
5667 the loop, then we can safely optimize. */
5668 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
5670 if (GET_CODE (p
) == CODE_LABEL
)
5672 if (GET_CODE (p
) != JUMP_INSN
)
5675 before_comparison
= get_condition_for_loop (p
);
5676 if (before_comparison
5677 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
5678 && GET_CODE (before_comparison
) == LT
5679 && XEXP (before_comparison
, 1) == const0_rtx
5680 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
5681 && INTVAL (bl
->biv
->add_val
) == -1)
5683 REG_NOTES (PREV_INSN (loop_end
))
5684 = gen_rtx (EXPR_LIST
, REG_NONNEG
, NULL_RTX
,
5685 REG_NOTES (PREV_INSN (loop_end
)));
5692 else if (num_mem_sets
<= 1)
5694 /* Try to change inc to dec, so can apply above optimization. */
5696 all registers modified are induction variables or invariant,
5697 all memory references have non-overlapping addresses
5698 (obviously true if only one write)
5699 allow 2 insns for the compare/jump at the end of the loop. */
5700 int num_nonfixed_reads
= 0;
5701 /* 1 if the iteration var is used only to count iterations. */
5702 int no_use_except_counting
= 0;
5703 /* 1 if the loop has no memory store, or it has a single memory store
5704 which is reversible. */
5705 int reversible_mem_store
= 1;
5707 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
5708 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
5709 num_nonfixed_reads
+= count_nonfixed_reads (PATTERN (p
));
5711 if (bl
->giv_count
== 0
5712 && ! loop_number_exit_labels
[uid_loop_num
[INSN_UID (loop_start
)]])
5714 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
5716 /* If there are no givs for this biv, and the only exit is the
5717 fall through at the end of the the loop, then
5718 see if perhaps there are no uses except to count. */
5719 no_use_except_counting
= 1;
5720 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
5721 if (GET_RTX_CLASS (GET_CODE (p
)) == 'i')
5723 rtx set
= single_set (p
);
5725 if (set
&& GET_CODE (SET_DEST (set
)) == REG
5726 && REGNO (SET_DEST (set
)) == bl
->regno
)
5727 /* An insn that sets the biv is okay. */
5729 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
5730 || p
== prev_nonnote_insn (loop_end
))
5731 /* Don't bother about the end test. */
5733 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
5734 /* Any other use of the biv is no good. */
5736 no_use_except_counting
= 0;
5742 /* If the loop has a single store, and the destination address is
5743 invariant, then we can't reverse the loop, because this address
5744 might then have the wrong value at loop exit.
5745 This would work if the source was invariant also, however, in that
5746 case, the insn should have been moved out of the loop. */
5748 if (num_mem_sets
== 1)
5749 reversible_mem_store
5750 = (! unknown_address_altered
5751 && ! invariant_p (XEXP (loop_store_mems
[0], 0)));
5753 /* This code only acts for innermost loops. Also it simplifies
5754 the memory address check by only reversing loops with
5755 zero or one memory access.
5756 Two memory accesses could involve parts of the same array,
5757 and that can't be reversed. */
5759 if (num_nonfixed_reads
<= 1
5761 && !loop_has_volatile
5762 && reversible_mem_store
5763 && (no_use_except_counting
5764 || (bl
->giv_count
+ bl
->biv_count
+ num_mem_sets
5765 + num_movables
+ 2 == insn_count
)))
5769 /* Loop can be reversed. */
5770 if (loop_dump_stream
)
5771 fprintf (loop_dump_stream
, "Can reverse loop\n");
5773 /* Now check other conditions:
5774 initial_value must be zero,
5775 final_value % add_val == 0, so that when reversed, the
5776 biv will be zero on the last iteration.
5778 This test can probably be improved since +/- 1 in the constant
5779 can be obtained by changing LT to LE and vice versa; this is
5782 if (comparison
&& bl
->initial_value
== const0_rtx
5783 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
5784 /* LE gets turned into LT */
5785 && GET_CODE (comparison
) == LT
5786 && (INTVAL (XEXP (comparison
, 1))
5787 % INTVAL (bl
->biv
->add_val
)) == 0)
5789 /* Register will always be nonnegative, with value
5790 0 on last iteration if loop reversed */
5792 /* Save some info needed to produce the new insns. */
5793 reg
= bl
->biv
->dest_reg
;
5794 jump_label
= XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end
))), 1);
5795 new_add_val
= GEN_INT (- INTVAL (bl
->biv
->add_val
));
5797 final_value
= XEXP (comparison
, 1);
5798 start_value
= GEN_INT (INTVAL (XEXP (comparison
, 1))
5799 - INTVAL (bl
->biv
->add_val
));
5801 /* Initialize biv to start_value before loop start.
5802 The old initializing insn will be deleted as a
5803 dead store by flow.c. */
5804 emit_insn_before (gen_move_insn (reg
, start_value
), loop_start
);
5806 /* Add insn to decrement register, and delete insn
5807 that incremented the register. */
5808 p
= emit_insn_before (gen_add2_insn (reg
, new_add_val
),
5810 delete_insn (bl
->biv
->insn
);
5812 /* Update biv info to reflect its new status. */
5814 bl
->initial_value
= start_value
;
5815 bl
->biv
->add_val
= new_add_val
;
5817 /* Inc LABEL_NUSES so that delete_insn will
5818 not delete the label. */
5819 LABEL_NUSES (XEXP (jump_label
, 0)) ++;
5821 /* Emit an insn after the end of the loop to set the biv's
5822 proper exit value if it is used anywhere outside the loop. */
5823 if ((regno_last_uid
[bl
->regno
]
5824 != INSN_UID (PREV_INSN (PREV_INSN (loop_end
))))
5826 || regno_first_uid
[bl
->regno
] != INSN_UID (bl
->init_insn
))
5827 emit_insn_after (gen_move_insn (reg
, final_value
),
5830 /* Delete compare/branch at end of loop. */
5831 delete_insn (PREV_INSN (loop_end
));
5832 delete_insn (PREV_INSN (loop_end
));
5834 /* Add new compare/branch insn at end of loop. */
5836 emit_cmp_insn (reg
, const0_rtx
, GE
, NULL_RTX
,
5837 GET_MODE (reg
), 0, 0);
5838 emit_jump_insn (gen_bge (XEXP (jump_label
, 0)));
5839 tem
= gen_sequence ();
5841 emit_jump_insn_before (tem
, loop_end
);
5843 for (tem
= PREV_INSN (loop_end
);
5844 tem
&& GET_CODE (tem
) != JUMP_INSN
; tem
= PREV_INSN (tem
))
5848 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
5850 /* Increment of LABEL_NUSES done above. */
5851 /* Register is now always nonnegative,
5852 so add REG_NONNEG note to the branch. */
5853 REG_NOTES (tem
) = gen_rtx (EXPR_LIST
, REG_NONNEG
, NULL_RTX
,
5859 /* Mark that this biv has been reversed. Each giv which depends
5860 on this biv, and which is also live past the end of the loop
5861 will have to be fixed up. */
5865 if (loop_dump_stream
)
5866 fprintf (loop_dump_stream
,
5867 "Reversed loop and added reg_nonneg\n");
5877 /* Verify whether the biv BL appears to be eliminable,
5878 based on the insns in the loop that refer to it.
5879 LOOP_START is the first insn of the loop, and END is the end insn.
5881 If ELIMINATE_P is non-zero, actually do the elimination.
5883 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
5884 determine whether invariant insns should be placed inside or at the
5885 start of the loop. */
5888 maybe_eliminate_biv (bl
, loop_start
, end
, eliminate_p
, threshold
, insn_count
)
5889 struct iv_class
*bl
;
5893 int threshold
, insn_count
;
5895 rtx reg
= bl
->biv
->dest_reg
;
5898 /* Scan all insns in the loop, stopping if we find one that uses the
5899 biv in a way that we cannot eliminate. */
5901 for (p
= loop_start
; p
!= end
; p
= NEXT_INSN (p
))
5903 enum rtx_code code
= GET_CODE (p
);
5904 rtx where
= threshold
>= insn_count
? loop_start
: p
;
5906 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
5907 && reg_mentioned_p (reg
, PATTERN (p
))
5908 && ! maybe_eliminate_biv_1 (PATTERN (p
), p
, bl
, eliminate_p
, where
))
5910 if (loop_dump_stream
)
5911 fprintf (loop_dump_stream
,
5912 "Cannot eliminate biv %d: biv used in insn %d.\n",
5913 bl
->regno
, INSN_UID (p
));
5920 if (loop_dump_stream
)
5921 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
5922 bl
->regno
, eliminate_p
? "was" : "can be");
5929 /* If BL appears in X (part of the pattern of INSN), see if we can
5930 eliminate its use. If so, return 1. If not, return 0.
5932 If BIV does not appear in X, return 1.
5934 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
5935 where extra insns should be added. Depending on how many items have been
5936 moved out of the loop, it will either be before INSN or at the start of
5940 maybe_eliminate_biv_1 (x
, insn
, bl
, eliminate_p
, where
)
5942 struct iv_class
*bl
;
5946 enum rtx_code code
= GET_CODE (x
);
5947 rtx reg
= bl
->biv
->dest_reg
;
5948 enum machine_mode mode
= GET_MODE (reg
);
5949 struct induction
*v
;
5958 /* If we haven't already been able to do something with this BIV,
5959 we can't eliminate it. */
5965 /* If this sets the BIV, it is not a problem. */
5966 if (SET_DEST (x
) == reg
)
5969 /* If this is an insn that defines a giv, it is also ok because
5970 it will go away when the giv is reduced. */
5971 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5972 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
5976 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
5978 /* Can replace with any giv that was reduced and
5979 that has (MULT_VAL != 0) and (ADD_VAL == 0).
5980 Require a constant for MULT_VAL, so we know it's nonzero. */
5982 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5983 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
5984 && v
->add_val
== const0_rtx
5985 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
5991 /* If the giv has the opposite direction of change,
5992 then reverse the comparison. */
5993 if (INTVAL (v
->mult_val
) < 0)
5994 new = gen_rtx (COMPARE
, GET_MODE (v
->new_reg
),
5995 const0_rtx
, v
->new_reg
);
5999 /* We can probably test that giv's reduced reg. */
6000 if (validate_change (insn
, &SET_SRC (x
), new, 0))
6004 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6005 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6006 Require a constant for MULT_VAL, so we know it's nonzero. */
6008 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6009 if (CONSTANT_P (v
->mult_val
) && v
->mult_val
!= const0_rtx
6010 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6016 /* If the giv has the opposite direction of change,
6017 then reverse the comparison. */
6018 if (INTVAL (v
->mult_val
) < 0)
6019 new = gen_rtx (COMPARE
, VOIDmode
, copy_rtx (v
->add_val
),
6022 new = gen_rtx (COMPARE
, VOIDmode
, v
->new_reg
,
6023 copy_rtx (v
->add_val
));
6025 /* Replace biv with the giv's reduced register. */
6026 update_reg_last_use (v
->add_val
, insn
);
6027 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
6030 /* Insn doesn't support that constant or invariant. Copy it
6031 into a register (it will be a loop invariant.) */
6032 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
6034 emit_insn_before (gen_move_insn (tem
, copy_rtx (v
->add_val
)),
6037 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)),
6038 gen_rtx (COMPARE
, VOIDmode
,
6039 v
->new_reg
, tem
), 0))
6048 case GT
: case GE
: case GTU
: case GEU
:
6049 case LT
: case LE
: case LTU
: case LEU
:
6050 /* See if either argument is the biv. */
6051 if (XEXP (x
, 0) == reg
)
6052 arg
= XEXP (x
, 1), arg_operand
= 1;
6053 else if (XEXP (x
, 1) == reg
)
6054 arg
= XEXP (x
, 0), arg_operand
= 0;
6058 if (CONSTANT_P (arg
))
6060 /* First try to replace with any giv that has constant positive
6061 mult_val and constant add_val. We might be able to support
6062 negative mult_val, but it seems complex to do it in general. */
6064 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6065 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6066 && CONSTANT_P (v
->add_val
)
6067 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6073 /* Replace biv with the giv's reduced reg. */
6074 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6076 /* If all constants are actually constant integers and
6077 the derived constant can be directly placed in the COMPARE,
6079 if (GET_CODE (arg
) == CONST_INT
6080 && GET_CODE (v
->mult_val
) == CONST_INT
6081 && GET_CODE (v
->add_val
) == CONST_INT
6082 && validate_change (insn
, &XEXP (x
, arg_operand
),
6083 GEN_INT (INTVAL (arg
)
6084 * INTVAL (v
->mult_val
)
6085 + INTVAL (v
->add_val
)), 0))
6088 /* Otherwise, load it into a register. */
6089 tem
= gen_reg_rtx (mode
);
6090 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6091 if (validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 0))
6094 /* If that failed, put back the change we made above. */
6095 XEXP (x
, 1-arg_operand
) = reg
;
6098 /* Look for giv with positive constant mult_val and nonconst add_val.
6099 Insert insns to calculate new compare value. */
6101 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6102 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6103 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6111 tem
= gen_reg_rtx (mode
);
6113 /* Replace biv with giv's reduced register. */
6114 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6117 /* Compute value to compare against. */
6118 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
, tem
, where
);
6119 /* Use it in this insn. */
6120 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6121 if (apply_change_group ())
6125 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
6127 if (invariant_p (arg
) == 1)
6129 /* Look for giv with constant positive mult_val and nonconst
6130 add_val. Insert insns to compute new compare value. */
6132 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6133 if (CONSTANT_P (v
->mult_val
) && INTVAL (v
->mult_val
) > 0
6134 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
6142 tem
= gen_reg_rtx (mode
);
6144 /* Replace biv with giv's reduced register. */
6145 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
6148 /* Compute value to compare against. */
6149 emit_iv_add_mult (arg
, v
->mult_val
, v
->add_val
,
6151 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
6152 if (apply_change_group ())
6157 /* This code has problems. Basically, you can't know when
6158 seeing if we will eliminate BL, whether a particular giv
6159 of ARG will be reduced. If it isn't going to be reduced,
6160 we can't eliminate BL. We can try forcing it to be reduced,
6161 but that can generate poor code.
6163 The problem is that the benefit of reducing TV, below should
6164 be increased if BL can actually be eliminated, but this means
6165 we might have to do a topological sort of the order in which
6166 we try to process biv. It doesn't seem worthwhile to do
6167 this sort of thing now. */
6170 /* Otherwise the reg compared with had better be a biv. */
6171 if (GET_CODE (arg
) != REG
6172 || reg_iv_type
[REGNO (arg
)] != BASIC_INDUCT
)
6175 /* Look for a pair of givs, one for each biv,
6176 with identical coefficients. */
6177 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6179 struct induction
*tv
;
6181 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
6184 for (tv
= reg_biv_class
[REGNO (arg
)]->giv
; tv
; tv
= tv
->next_iv
)
6185 if (! tv
->ignore
&& ! tv
->maybe_dead
6186 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
6187 && rtx_equal_p (tv
->add_val
, v
->add_val
)
6188 && tv
->mode
== mode
)
6193 /* Replace biv with its giv's reduced reg. */
6194 XEXP (x
, 1-arg_operand
) = v
->new_reg
;
6195 /* Replace other operand with the other giv's
6197 XEXP (x
, arg_operand
) = tv
->new_reg
;
6204 /* If we get here, the biv can't be eliminated. */
6208 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6209 biv is used in it, since it will be replaced. */
6210 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6211 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
6216 /* See if any subexpression fails elimination. */
6217 fmt
= GET_RTX_FORMAT (code
);
6218 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
6223 if (! maybe_eliminate_biv_1 (XEXP (x
, i
), insn
, bl
,
6224 eliminate_p
, where
))
6229 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6230 if (! maybe_eliminate_biv_1 (XVECEXP (x
, i
, j
), insn
, bl
,
6231 eliminate_p
, where
))
6240 /* Return nonzero if the last use of REG
6241 is in an insn following INSN in the same basic block. */
6244 last_use_this_basic_block (reg
, insn
)
6250 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
6253 if (regno_last_uid
[REGNO (reg
)] == INSN_UID (n
))
6259 /* Called via `note_stores' to record the initial value of a biv. Here we
6260 just record the location of the set and process it later. */
6263 record_initial (dest
, set
)
6267 struct iv_class
*bl
;
6269 if (GET_CODE (dest
) != REG
6270 || REGNO (dest
) >= max_reg_before_loop
6271 || reg_iv_type
[REGNO (dest
)] != BASIC_INDUCT
)
6274 bl
= reg_biv_class
[REGNO (dest
)];
6276 /* If this is the first set found, record it. */
6277 if (bl
->init_insn
== 0)
6279 bl
->init_insn
= note_insn
;
6284 /* If any of the registers in X are "old" and currently have a last use earlier
6285 than INSN, update them to have a last use of INSN. Their actual last use
6286 will be the previous insn but it will not have a valid uid_luid so we can't
6290 update_reg_last_use (x
, insn
)
6294 /* Check for the case where INSN does not have a valid luid. In this case,
6295 there is no need to modify the regno_last_uid, as this can only happen
6296 when code is inserted after the loop_end to set a pseudo's final value,
6297 and hence this insn will never be the last use of x. */
6298 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
6299 && INSN_UID (insn
) < max_uid_for_loop
6300 && uid_luid
[regno_last_uid
[REGNO (x
)]] < uid_luid
[INSN_UID (insn
)])
6301 regno_last_uid
[REGNO (x
)] = INSN_UID (insn
);
6305 register char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6306 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6309 update_reg_last_use (XEXP (x
, i
), insn
);
6310 else if (fmt
[i
] == 'E')
6311 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6312 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
6317 /* Given a jump insn JUMP, return the condition that will cause it to branch
6318 to its JUMP_LABEL. If the condition cannot be understood, or is an
6319 inequality floating-point comparison which needs to be reversed, 0 will
6322 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6323 insn used in locating the condition was found. If a replacement test
6324 of the condition is desired, it should be placed in front of that
6325 insn and we will be sure that the inputs are still valid.
6327 The condition will be returned in a canonical form to simplify testing by
6328 callers. Specifically:
6330 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6331 (2) Both operands will be machine operands; (cc0) will have been replaced.
6332 (3) If an operand is a constant, it will be the second operand.
6333 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6334 for GE, GEU, and LEU. */
6337 get_condition (jump
, earliest
)
6346 int reverse_code
= 0;
6347 int did_reverse_condition
= 0;
6349 /* If this is not a standard conditional jump, we can't parse it. */
6350 if (GET_CODE (jump
) != JUMP_INSN
6351 || ! condjump_p (jump
) || simplejump_p (jump
))
6354 code
= GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 0));
6355 op0
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 0);
6356 op1
= XEXP (XEXP (SET_SRC (PATTERN (jump
)), 0), 1);
6361 /* If this branches to JUMP_LABEL when the condition is false, reverse
6363 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump
)), 2)) == LABEL_REF
6364 && XEXP (XEXP (SET_SRC (PATTERN (jump
)), 2), 0) == JUMP_LABEL (jump
))
6365 code
= reverse_condition (code
), did_reverse_condition
^= 1;
6367 /* If we are comparing a register with zero, see if the register is set
6368 in the previous insn to a COMPARE or a comparison operation. Perform
6369 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6372 while (GET_RTX_CLASS (code
) == '<' && op1
== const0_rtx
)
6374 /* Set non-zero when we find something of interest. */
6378 /* If comparison with cc0, import actual comparison from compare
6382 if ((prev
= prev_nonnote_insn (prev
)) == 0
6383 || GET_CODE (prev
) != INSN
6384 || (set
= single_set (prev
)) == 0
6385 || SET_DEST (set
) != cc0_rtx
)
6388 op0
= SET_SRC (set
);
6389 op1
= CONST0_RTX (GET_MODE (op0
));
6395 /* If this is a COMPARE, pick up the two things being compared. */
6396 if (GET_CODE (op0
) == COMPARE
)
6398 op1
= XEXP (op0
, 1);
6399 op0
= XEXP (op0
, 0);
6402 else if (GET_CODE (op0
) != REG
)
6405 /* Go back to the previous insn. Stop if it is not an INSN. We also
6406 stop if it isn't a single set or if it has a REG_INC note because
6407 we don't want to bother dealing with it. */
6409 if ((prev
= prev_nonnote_insn (prev
)) == 0
6410 || GET_CODE (prev
) != INSN
6411 || FIND_REG_INC_NOTE (prev
, 0)
6412 || (set
= single_set (prev
)) == 0)
6415 /* If this is setting OP0, get what it sets it to if it looks
6417 if (SET_DEST (set
) == op0
)
6419 enum machine_mode inner_mode
= GET_MODE (SET_SRC (set
));
6421 if ((GET_CODE (SET_SRC (set
)) == COMPARE
6424 && GET_MODE_CLASS (inner_mode
) == MODE_INT
6425 && (GET_MODE_BITSIZE (inner_mode
)
6426 <= HOST_BITS_PER_WIDE_INT
)
6427 && (STORE_FLAG_VALUE
6428 & ((HOST_WIDE_INT
) 1
6429 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
6430 #ifdef FLOAT_STORE_FLAG_VALUE
6432 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
6433 && FLOAT_STORE_FLAG_VALUE
< 0)
6436 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')))
6438 else if (((code
== EQ
6440 && (GET_MODE_BITSIZE (inner_mode
)
6441 <= HOST_BITS_PER_WIDE_INT
)
6442 && GET_MODE_CLASS (inner_mode
) == MODE_INT
6443 && (STORE_FLAG_VALUE
6444 & ((HOST_WIDE_INT
) 1
6445 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
6446 #ifdef FLOAT_STORE_FLAG_VALUE
6448 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
6449 && FLOAT_STORE_FLAG_VALUE
< 0)
6452 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<')
6454 /* We might have reversed a LT to get a GE here. But this wasn't
6455 actually the comparison of data, so we don't flag that we
6456 have had to reverse the condition. */
6457 did_reverse_condition
^= 1;
6465 else if (reg_set_p (op0
, prev
))
6466 /* If this sets OP0, but not directly, we have to give up. */
6471 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
6472 code
= GET_CODE (x
);
6475 code
= reverse_condition (code
);
6476 did_reverse_condition
^= 1;
6480 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
6486 /* If constant is first, put it last. */
6487 if (CONSTANT_P (op0
))
6488 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
6490 /* If OP0 is the result of a comparison, we weren't able to find what
6491 was really being compared, so fail. */
6492 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6495 /* Canonicalize any ordered comparison with integers involving equality
6496 if we can do computations in the relevant mode and we do not
6499 if (GET_CODE (op1
) == CONST_INT
6500 && GET_MODE (op0
) != VOIDmode
6501 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
6503 HOST_WIDE_INT const_val
= INTVAL (op1
);
6504 unsigned HOST_WIDE_INT uconst_val
= const_val
;
6505 unsigned HOST_WIDE_INT max_val
6506 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
6511 if (const_val
!= max_val
>> 1)
6512 code
= LT
, op1
= GEN_INT (const_val
+ 1);
6517 != (((HOST_WIDE_INT
) 1
6518 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
6519 code
= GT
, op1
= GEN_INT (const_val
- 1);
6523 if (uconst_val
!= max_val
)
6524 code
= LTU
, op1
= GEN_INT (uconst_val
+ 1);
6528 if (uconst_val
!= 0)
6529 code
= GTU
, op1
= GEN_INT (uconst_val
- 1);
6534 /* If this was floating-point and we reversed anything other than an
6535 EQ or NE, return zero. */
6536 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
6537 && did_reverse_condition
&& code
!= NE
&& code
!= EQ
6539 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
6543 /* Never return CC0; return zero instead. */
6548 return gen_rtx (code
, VOIDmode
, op0
, op1
);
6551 /* Similar to above routine, except that we also put an invariant last
6552 unless both operands are invariants. */
6555 get_condition_for_loop (x
)
6558 rtx comparison
= get_condition (x
, NULL_PTR
);
6561 || ! invariant_p (XEXP (comparison
, 0))
6562 || invariant_p (XEXP (comparison
, 1)))
6565 return gen_rtx (swap_condition (GET_CODE (comparison
)), VOIDmode
,
6566 XEXP (comparison
, 1), XEXP (comparison
, 0));