* dbxout.c (current_file): Also wrap inside DBX_DEBUGGING_INFO ||
[official-gcc.git] / gcc / loop.c
blob667833d64a51a917a16caae51b4dba8d506e214c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables.
27 Basic induction variables (BIVs) are a pseudo registers which are set within
28 a loop only by incrementing or decrementing its value. General induction
29 variables (GIVs) are pseudo registers with a value which is a linear function
30 of a basic induction variable. BIVs are recognized by `basic_induction_var';
31 GIVs by `general_induction_var'.
33 Once induction variables are identified, strength reduction is applied to the
34 general induction variables, and induction variable elimination is applied to
35 the basic induction variables.
37 It also finds cases where
38 a register is set within the loop by zero-extending a narrower value
39 and changes these to zero the entire register once before the loop
40 and merely copy the low part within the loop.
42 Most of the complexity is in heuristics to decide when it is worth
43 while to do these things. */
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "rtl.h"
50 #include "tm_p.h"
51 #include "function.h"
52 #include "expr.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "insn-config.h"
56 #include "regs.h"
57 #include "recog.h"
58 #include "flags.h"
59 #include "real.h"
60 #include "loop.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
69 /* Not really meaningful values, but at least something. */
70 #ifndef SIMULTANEOUS_PREFETCHES
71 #define SIMULTANEOUS_PREFETCHES 3
72 #endif
73 #ifndef PREFETCH_BLOCK
74 #define PREFETCH_BLOCK 32
75 #endif
76 #ifndef HAVE_prefetch
77 #define HAVE_prefetch 0
78 #define CODE_FOR_prefetch 0
79 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
80 #endif
82 /* Give up the prefetch optimizations once we exceed a given threshold.
83 It is unlikely that we would be able to optimize something in a loop
84 with so many detected prefetches. */
85 #define MAX_PREFETCHES 100
86 /* The number of prefetch blocks that are beneficial to fetch at once before
87 a loop with a known (and low) iteration count. */
88 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
89 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
90 since it is likely that the data are already in the cache. */
91 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
93 /* Parameterize some prefetch heuristics so they can be turned on and off
94 easily for performance testing on new architectures. These can be
95 defined in target-dependent files. */
97 /* Prefetch is worthwhile only when loads/stores are dense. */
98 #ifndef PREFETCH_ONLY_DENSE_MEM
99 #define PREFETCH_ONLY_DENSE_MEM 1
100 #endif
102 /* Define what we mean by "dense" loads and stores; This value divided by 256
103 is the minimum percentage of memory references that worth prefetching. */
104 #ifndef PREFETCH_DENSE_MEM
105 #define PREFETCH_DENSE_MEM 220
106 #endif
108 /* Do not prefetch for a loop whose iteration count is known to be low. */
109 #ifndef PREFETCH_NO_LOW_LOOPCNT
110 #define PREFETCH_NO_LOW_LOOPCNT 1
111 #endif
113 /* Define what we mean by a "low" iteration count. */
114 #ifndef PREFETCH_LOW_LOOPCNT
115 #define PREFETCH_LOW_LOOPCNT 32
116 #endif
118 /* Do not prefetch for a loop that contains a function call; such a loop is
119 probably not an internal loop. */
120 #ifndef PREFETCH_NO_CALL
121 #define PREFETCH_NO_CALL 1
122 #endif
124 /* Do not prefetch accesses with an extreme stride. */
125 #ifndef PREFETCH_NO_EXTREME_STRIDE
126 #define PREFETCH_NO_EXTREME_STRIDE 1
127 #endif
129 /* Define what we mean by an "extreme" stride. */
130 #ifndef PREFETCH_EXTREME_STRIDE
131 #define PREFETCH_EXTREME_STRIDE 4096
132 #endif
134 /* Define a limit to how far apart indices can be and still be merged
135 into a single prefetch. */
136 #ifndef PREFETCH_EXTREME_DIFFERENCE
137 #define PREFETCH_EXTREME_DIFFERENCE 4096
138 #endif
140 /* Issue prefetch instructions before the loop to fetch data to be used
141 in the first few loop iterations. */
142 #ifndef PREFETCH_BEFORE_LOOP
143 #define PREFETCH_BEFORE_LOOP 1
144 #endif
146 /* Do not handle reversed order prefetches (negative stride). */
147 #ifndef PREFETCH_NO_REVERSE_ORDER
148 #define PREFETCH_NO_REVERSE_ORDER 1
149 #endif
151 /* Prefetch even if the GIV is in conditional code. */
152 #ifndef PREFETCH_CONDITIONAL
153 #define PREFETCH_CONDITIONAL 1
154 #endif
156 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
157 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
159 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
160 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
161 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
163 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
164 ((REGNO) < FIRST_PSEUDO_REGISTER \
165 ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
168 /* Vector mapping INSN_UIDs to luids.
169 The luids are like uids but increase monotonically always.
170 We use them to see whether a jump comes from outside a given loop. */
172 int *uid_luid;
174 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
175 number the insn is contained in. */
177 struct loop **uid_loop;
179 /* 1 + largest uid of any insn. */
181 int max_uid_for_loop;
183 /* Number of loops detected in current function. Used as index to the
184 next few tables. */
186 static int max_loop_num;
188 /* Bound on pseudo register number before loop optimization.
189 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
190 unsigned int max_reg_before_loop;
192 /* The value to pass to the next call of reg_scan_update. */
193 static int loop_max_reg;
195 /* During the analysis of a loop, a chain of `struct movable's
196 is made to record all the movable insns found.
197 Then the entire chain can be scanned to decide which to move. */
199 struct movable
201 rtx insn; /* A movable insn */
202 rtx set_src; /* The expression this reg is set from. */
203 rtx set_dest; /* The destination of this SET. */
204 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
205 of any registers used within the LIBCALL. */
206 int consec; /* Number of consecutive following insns
207 that must be moved with this one. */
208 unsigned int regno; /* The register it sets */
209 short lifetime; /* lifetime of that register;
210 may be adjusted when matching movables
211 that load the same value are found. */
212 short savings; /* Number of insns we can move for this reg,
213 including other movables that force this
214 or match this one. */
215 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
216 a low part that we should avoid changing when
217 clearing the rest of the reg. */
218 unsigned int cond : 1; /* 1 if only conditionally movable */
219 unsigned int force : 1; /* 1 means MUST move this insn */
220 unsigned int global : 1; /* 1 means reg is live outside this loop */
221 /* If PARTIAL is 1, GLOBAL means something different:
222 that the reg is live outside the range from where it is set
223 to the following label. */
224 unsigned int done : 1; /* 1 inhibits further processing of this */
226 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
227 In particular, moving it does not make it
228 invariant. */
229 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
230 load SRC, rather than copying INSN. */
231 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
232 first insn of a consecutive sets group. */
233 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
234 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
235 the original insn with a copy from that
236 pseudo, rather than deleting it. */
237 struct movable *match; /* First entry for same value */
238 struct movable *forces; /* An insn that must be moved if this is */
239 struct movable *next;
243 FILE *loop_dump_stream;
245 /* Forward declarations. */
247 static void invalidate_loops_containing_label (rtx);
248 static void find_and_verify_loops (rtx, struct loops *);
249 static void mark_loop_jump (rtx, struct loop *);
250 static void prescan_loop (struct loop *);
251 static int reg_in_basic_block_p (rtx, rtx);
252 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
253 static int labels_in_range_p (rtx, int);
254 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
255 static void note_addr_stored (rtx, rtx, void *);
256 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
257 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
258 static void scan_loop (struct loop*, int);
259 #if 0
260 static void replace_call_address (rtx, rtx, rtx);
261 #endif
262 static rtx skip_consec_insns (rtx, int);
263 static int libcall_benefit (rtx);
264 static void ignore_some_movables (struct loop_movables *);
265 static void force_movables (struct loop_movables *);
266 static void combine_movables (struct loop_movables *, struct loop_regs *);
267 static int num_unmoved_movables (const struct loop *);
268 static int regs_match_p (rtx, rtx, struct loop_movables *);
269 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
270 struct loop_regs *);
271 static void add_label_notes (rtx, rtx);
272 static void move_movables (struct loop *loop, struct loop_movables *, int,
273 int);
274 static void loop_movables_add (struct loop_movables *, struct movable *);
275 static void loop_movables_free (struct loop_movables *);
276 static int count_nonfixed_reads (const struct loop *, rtx);
277 static void loop_bivs_find (struct loop *);
278 static void loop_bivs_init_find (struct loop *);
279 static void loop_bivs_check (struct loop *);
280 static void loop_givs_find (struct loop *);
281 static void loop_givs_check (struct loop *);
282 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
283 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
284 struct induction *, rtx);
285 static void loop_givs_dead_check (struct loop *, struct iv_class *);
286 static void loop_givs_reduce (struct loop *, struct iv_class *);
287 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
288 static void loop_ivs_free (struct loop *);
289 static void strength_reduce (struct loop *, int);
290 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
291 static int valid_initial_value_p (rtx, rtx, int, rtx);
292 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
293 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
294 rtx, rtx *, int, int);
295 static void check_final_value (const struct loop *, struct induction *);
296 static void loop_ivs_dump (const struct loop *, FILE *, int);
297 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
298 static void loop_biv_dump (const struct induction *, FILE *, int);
299 static void loop_giv_dump (const struct induction *, FILE *, int);
300 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
301 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
302 rtx *);
303 static void update_giv_derive (const struct loop *, rtx);
304 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
305 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
306 rtx, rtx, rtx *, rtx *, rtx **);
307 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
308 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
309 rtx *, rtx *, int, int *, enum machine_mode);
310 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
311 rtx *, rtx *, rtx *);
312 static int check_dbra_loop (struct loop *, int);
313 static rtx express_from_1 (rtx, rtx, rtx);
314 static rtx combine_givs_p (struct induction *, struct induction *);
315 static int cmp_combine_givs_stats (const void *, const void *);
316 static void combine_givs (struct loop_regs *, struct iv_class *);
317 static int product_cheap_p (rtx, rtx);
318 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
319 int, int);
320 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
321 struct iv_class *, int, basic_block, rtx);
322 static int last_use_this_basic_block (rtx, rtx);
323 static void record_initial (rtx, rtx, void *);
324 static void update_reg_last_use (rtx, rtx);
325 static rtx next_insn_in_loop (const struct loop *, rtx);
326 static void loop_regs_scan (const struct loop *, int);
327 static int count_insns_in_loop (const struct loop *);
328 static int find_mem_in_note_1 (rtx *, void *);
329 static rtx find_mem_in_note (rtx);
330 static void load_mems (const struct loop *);
331 static int insert_loop_mem (rtx *, void *);
332 static int replace_loop_mem (rtx *, void *);
333 static void replace_loop_mems (rtx, rtx, rtx, int);
334 static int replace_loop_reg (rtx *, void *);
335 static void replace_loop_regs (rtx insn, rtx, rtx);
336 static void note_reg_stored (rtx, rtx, void *);
337 static void try_copy_prop (const struct loop *, rtx, unsigned int);
338 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
339 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
340 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
341 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
342 static void loop_regs_update (const struct loop *, rtx);
343 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
345 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
346 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
347 rtx, rtx);
348 static rtx loop_call_insn_hoist (const struct loop *, rtx);
349 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
351 static void loop_dump_aux (const struct loop *, FILE *, int);
352 static void loop_delete_insns (rtx, rtx);
353 static HOST_WIDE_INT remove_constant_addition (rtx *);
354 static rtx gen_load_of_final_value (rtx, rtx);
355 void debug_ivs (const struct loop *);
356 void debug_iv_class (const struct iv_class *);
357 void debug_biv (const struct induction *);
358 void debug_giv (const struct induction *);
359 void debug_loop (const struct loop *);
360 void debug_loops (const struct loops *);
362 typedef struct loop_replace_args
364 rtx match;
365 rtx replacement;
366 rtx insn;
367 } loop_replace_args;
369 /* Nonzero iff INSN is between START and END, inclusive. */
370 #define INSN_IN_RANGE_P(INSN, START, END) \
371 (INSN_UID (INSN) < max_uid_for_loop \
372 && INSN_LUID (INSN) >= INSN_LUID (START) \
373 && INSN_LUID (INSN) <= INSN_LUID (END))
375 /* Indirect_jump_in_function is computed once per function. */
376 static int indirect_jump_in_function;
377 static int indirect_jump_in_function_p (rtx);
379 static int compute_luids (rtx, rtx, int);
381 static int biv_elimination_giv_has_0_offset (struct induction *,
382 struct induction *, rtx);
384 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
385 copy the value of the strength reduced giv to its original register. */
386 static int copy_cost;
388 /* Cost of using a register, to normalize the benefits of a giv. */
389 static int reg_address_cost;
391 void
392 init_loop (void)
394 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
396 reg_address_cost = address_cost (reg, SImode);
398 copy_cost = COSTS_N_INSNS (1);
401 /* Compute the mapping from uids to luids.
402 LUIDs are numbers assigned to insns, like uids,
403 except that luids increase monotonically through the code.
404 Start at insn START and stop just before END. Assign LUIDs
405 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
406 static int
407 compute_luids (rtx start, rtx end, int prev_luid)
409 int i;
410 rtx insn;
412 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
414 if (INSN_UID (insn) >= max_uid_for_loop)
415 continue;
416 /* Don't assign luids to line-number NOTEs, so that the distance in
417 luids between two insns is not affected by -g. */
418 if (GET_CODE (insn) != NOTE
419 || NOTE_LINE_NUMBER (insn) <= 0)
420 uid_luid[INSN_UID (insn)] = ++i;
421 else
422 /* Give a line number note the same luid as preceding insn. */
423 uid_luid[INSN_UID (insn)] = i;
425 return i + 1;
428 /* Entry point of this file. Perform loop optimization
429 on the current function. F is the first insn of the function
430 and DUMPFILE is a stream for output of a trace of actions taken
431 (or 0 if none should be output). */
433 void
434 loop_optimize (rtx f, FILE *dumpfile, int flags)
436 rtx insn;
437 int i;
438 struct loops loops_data;
439 struct loops *loops = &loops_data;
440 struct loop_info *loops_info;
442 loop_dump_stream = dumpfile;
444 init_recog_no_volatile ();
446 max_reg_before_loop = max_reg_num ();
447 loop_max_reg = max_reg_before_loop;
449 regs_may_share = 0;
451 /* Count the number of loops. */
453 max_loop_num = 0;
454 for (insn = f; insn; insn = NEXT_INSN (insn))
456 if (GET_CODE (insn) == NOTE
457 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
458 max_loop_num++;
461 /* Don't waste time if no loops. */
462 if (max_loop_num == 0)
463 return;
465 loops->num = max_loop_num;
467 /* Get size to use for tables indexed by uids.
468 Leave some space for labels allocated by find_and_verify_loops. */
469 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
471 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
472 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
474 /* Allocate storage for array of loops. */
475 loops->array = xcalloc (loops->num, sizeof (struct loop));
477 /* Find and process each loop.
478 First, find them, and record them in order of their beginnings. */
479 find_and_verify_loops (f, loops);
481 /* Allocate and initialize auxiliary loop information. */
482 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
483 for (i = 0; i < (int) loops->num; i++)
484 loops->array[i].aux = loops_info + i;
486 /* Now find all register lifetimes. This must be done after
487 find_and_verify_loops, because it might reorder the insns in the
488 function. */
489 reg_scan (f, max_reg_before_loop, 1);
491 /* This must occur after reg_scan so that registers created by gcse
492 will have entries in the register tables.
494 We could have added a call to reg_scan after gcse_main in toplev.c,
495 but moving this call to init_alias_analysis is more efficient. */
496 init_alias_analysis ();
498 /* See if we went too far. Note that get_max_uid already returns
499 one more that the maximum uid of all insn. */
500 if (get_max_uid () > max_uid_for_loop)
501 abort ();
502 /* Now reset it to the actual size we need. See above. */
503 max_uid_for_loop = get_max_uid ();
505 /* find_and_verify_loops has already called compute_luids, but it
506 might have rearranged code afterwards, so we need to recompute
507 the luids now. */
508 compute_luids (f, NULL_RTX, 0);
510 /* Don't leave gaps in uid_luid for insns that have been
511 deleted. It is possible that the first or last insn
512 using some register has been deleted by cross-jumping.
513 Make sure that uid_luid for that former insn's uid
514 points to the general area where that insn used to be. */
515 for (i = 0; i < max_uid_for_loop; i++)
517 uid_luid[0] = uid_luid[i];
518 if (uid_luid[0] != 0)
519 break;
521 for (i = 0; i < max_uid_for_loop; i++)
522 if (uid_luid[i] == 0)
523 uid_luid[i] = uid_luid[i - 1];
525 /* Determine if the function has indirect jump. On some systems
526 this prevents low overhead loop instructions from being used. */
527 indirect_jump_in_function = indirect_jump_in_function_p (f);
529 /* Now scan the loops, last ones first, since this means inner ones are done
530 before outer ones. */
531 for (i = max_loop_num - 1; i >= 0; i--)
533 struct loop *loop = &loops->array[i];
535 if (! loop->invalid && loop->end)
536 scan_loop (loop, flags);
539 end_alias_analysis ();
541 /* Clean up. */
542 free (uid_luid);
543 free (uid_loop);
544 free (loops_info);
545 free (loops->array);
548 /* Returns the next insn, in execution order, after INSN. START and
549 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
550 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
551 insn-stream; it is used with loops that are entered near the
552 bottom. */
554 static rtx
555 next_insn_in_loop (const struct loop *loop, rtx insn)
557 insn = NEXT_INSN (insn);
559 if (insn == loop->end)
561 if (loop->top)
562 /* Go to the top of the loop, and continue there. */
563 insn = loop->top;
564 else
565 /* We're done. */
566 insn = NULL_RTX;
569 if (insn == loop->scan_start)
570 /* We're done. */
571 insn = NULL_RTX;
573 return insn;
576 /* Optimize one loop described by LOOP. */
578 /* ??? Could also move memory writes out of loops if the destination address
579 is invariant, the source is invariant, the memory write is not volatile,
580 and if we can prove that no read inside the loop can read this address
581 before the write occurs. If there is a read of this address after the
582 write, then we can also mark the memory read as invariant. */
584 static void
585 scan_loop (struct loop *loop, int flags)
587 struct loop_info *loop_info = LOOP_INFO (loop);
588 struct loop_regs *regs = LOOP_REGS (loop);
589 int i;
590 rtx loop_start = loop->start;
591 rtx loop_end = loop->end;
592 rtx p;
593 /* 1 if we are scanning insns that could be executed zero times. */
594 int maybe_never = 0;
595 /* 1 if we are scanning insns that might never be executed
596 due to a subroutine call which might exit before they are reached. */
597 int call_passed = 0;
598 /* Number of insns in the loop. */
599 int insn_count;
600 int tem;
601 rtx temp, update_start, update_end;
602 /* The SET from an insn, if it is the only SET in the insn. */
603 rtx set, set1;
604 /* Chain describing insns movable in current loop. */
605 struct loop_movables *movables = LOOP_MOVABLES (loop);
606 /* Ratio of extra register life span we can justify
607 for saving an instruction. More if loop doesn't call subroutines
608 since in that case saving an insn makes more difference
609 and more registers are available. */
610 int threshold;
611 /* Nonzero if we are scanning instructions in a sub-loop. */
612 int loop_depth = 0;
613 int in_libcall;
615 loop->top = 0;
617 movables->head = 0;
618 movables->last = 0;
620 /* Determine whether this loop starts with a jump down to a test at
621 the end. This will occur for a small number of loops with a test
622 that is too complex to duplicate in front of the loop.
624 We search for the first insn or label in the loop, skipping NOTEs.
625 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
626 (because we might have a loop executed only once that contains a
627 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
628 (in case we have a degenerate loop).
630 Note that if we mistakenly think that a loop is entered at the top
631 when, in fact, it is entered at the exit test, the only effect will be
632 slightly poorer optimization. Making the opposite error can generate
633 incorrect code. Since very few loops now start with a jump to the
634 exit test, the code here to detect that case is very conservative. */
636 for (p = NEXT_INSN (loop_start);
637 p != loop_end
638 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
639 && (GET_CODE (p) != NOTE
640 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
641 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
642 p = NEXT_INSN (p))
645 loop->scan_start = p;
647 /* If loop end is the end of the current function, then emit a
648 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
649 note insn. This is the position we use when sinking insns out of
650 the loop. */
651 if (NEXT_INSN (loop->end) != 0)
652 loop->sink = NEXT_INSN (loop->end);
653 else
654 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
656 /* Set up variables describing this loop. */
657 prescan_loop (loop);
658 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
660 /* If loop has a jump before the first label,
661 the true entry is the target of that jump.
662 Start scan from there.
663 But record in LOOP->TOP the place where the end-test jumps
664 back to so we can scan that after the end of the loop. */
665 if (GET_CODE (p) == JUMP_INSN
666 /* Loop entry must be unconditional jump (and not a RETURN) */
667 && any_uncondjump_p (p)
668 && JUMP_LABEL (p) != 0
669 /* Check to see whether the jump actually
670 jumps out of the loop (meaning it's no loop).
671 This case can happen for things like
672 do {..} while (0). If this label was generated previously
673 by loop, we can't tell anything about it and have to reject
674 the loop. */
675 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
677 loop->top = next_label (loop->scan_start);
678 loop->scan_start = JUMP_LABEL (p);
681 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
682 as required by loop_reg_used_before_p. So skip such loops. (This
683 test may never be true, but it's best to play it safe.)
685 Also, skip loops where we do not start scanning at a label. This
686 test also rejects loops starting with a JUMP_INSN that failed the
687 test above. */
689 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
690 || GET_CODE (loop->scan_start) != CODE_LABEL)
692 if (loop_dump_stream)
693 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
694 INSN_UID (loop_start), INSN_UID (loop_end));
695 return;
698 /* Allocate extra space for REGs that might be created by load_mems.
699 We allocate a little extra slop as well, in the hopes that we
700 won't have to reallocate the regs array. */
701 loop_regs_scan (loop, loop_info->mems_idx + 16);
702 insn_count = count_insns_in_loop (loop);
704 if (loop_dump_stream)
706 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
707 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
708 if (loop->cont)
709 fprintf (loop_dump_stream, "Continue at insn %d.\n",
710 INSN_UID (loop->cont));
713 /* Scan through the loop finding insns that are safe to move.
714 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
715 this reg will be considered invariant for subsequent insns.
716 We consider whether subsequent insns use the reg
717 in deciding whether it is worth actually moving.
719 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
720 and therefore it is possible that the insns we are scanning
721 would never be executed. At such times, we must make sure
722 that it is safe to execute the insn once instead of zero times.
723 When MAYBE_NEVER is 0, all insns will be executed at least once
724 so that is not a problem. */
726 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
727 p != NULL_RTX;
728 p = next_insn_in_loop (loop, p))
730 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
731 in_libcall--;
732 if (GET_CODE (p) == INSN)
734 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
735 if (temp)
736 in_libcall++;
737 if (! in_libcall
738 && (set = single_set (p))
739 && GET_CODE (SET_DEST (set)) == REG
740 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
741 && SET_DEST (set) != pic_offset_table_rtx
742 #endif
743 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
745 int tem1 = 0;
746 int tem2 = 0;
747 int move_insn = 0;
748 int insert_temp = 0;
749 rtx src = SET_SRC (set);
750 rtx dependencies = 0;
752 /* Figure out what to use as a source of this insn. If a
753 REG_EQUIV note is given or if a REG_EQUAL note with a
754 constant operand is specified, use it as the source and
755 mark that we should move this insn by calling
756 emit_move_insn rather that duplicating the insn.
758 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
759 note is present. */
760 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
761 if (temp)
762 src = XEXP (temp, 0), move_insn = 1;
763 else
765 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
766 if (temp && CONSTANT_P (XEXP (temp, 0)))
767 src = XEXP (temp, 0), move_insn = 1;
768 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
770 src = XEXP (temp, 0);
771 /* A libcall block can use regs that don't appear in
772 the equivalent expression. To move the libcall,
773 we must move those regs too. */
774 dependencies = libcall_other_reg (p, src);
778 /* For parallels, add any possible uses to the dependencies, as
779 we can't move the insn without resolving them first. */
780 if (GET_CODE (PATTERN (p)) == PARALLEL)
782 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
784 rtx x = XVECEXP (PATTERN (p), 0, i);
785 if (GET_CODE (x) == USE)
786 dependencies
787 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
788 dependencies);
792 if (/* The register is used in basic blocks other
793 than the one where it is set (meaning that
794 something after this point in the loop might
795 depend on its value before the set). */
796 ! reg_in_basic_block_p (p, SET_DEST (set))
797 /* And the set is not guaranteed to be executed once
798 the loop starts, or the value before the set is
799 needed before the set occurs...
801 ??? Note we have quadratic behavior here, mitigated
802 by the fact that the previous test will often fail for
803 large loops. Rather than re-scanning the entire loop
804 each time for register usage, we should build tables
805 of the register usage and use them here instead. */
806 && (maybe_never
807 || loop_reg_used_before_p (loop, set, p)))
808 /* It is unsafe to move the set. However, it may be OK to
809 move the source into a new pseudo, and substitute a
810 reg-to-reg copy for the original insn.
812 This code used to consider it OK to move a set of a variable
813 which was not created by the user and not used in an exit
814 test.
815 That behavior is incorrect and was removed. */
816 insert_temp = 1;
818 /* Don't try to optimize a MODE_CC set with a constant
819 source. It probably will be combined with a conditional
820 jump. */
821 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
822 && CONSTANT_P (src))
824 /* Don't try to optimize a register that was made
825 by loop-optimization for an inner loop.
826 We don't know its life-span, so we can't compute
827 the benefit. */
828 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
830 /* Don't move the source and add a reg-to-reg copy:
831 - with -Os (this certainly increases size),
832 - if the mode doesn't support copy operations (obviously),
833 - if the source is already a reg (the motion will gain nothing),
834 - if the source is a legitimate constant (likewise). */
835 else if (insert_temp
836 && (optimize_size
837 || ! can_copy_p (GET_MODE (SET_SRC (set)))
838 || GET_CODE (SET_SRC (set)) == REG
839 || (CONSTANT_P (SET_SRC (set))
840 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
842 else if ((tem = loop_invariant_p (loop, src))
843 && (dependencies == 0
844 || (tem2
845 = loop_invariant_p (loop, dependencies)) != 0)
846 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
847 || (tem1
848 = consec_sets_invariant_p
849 (loop, SET_DEST (set),
850 regs->array[REGNO (SET_DEST (set))].set_in_loop,
851 p)))
852 /* If the insn can cause a trap (such as divide by zero),
853 can't move it unless it's guaranteed to be executed
854 once loop is entered. Even a function call might
855 prevent the trap insn from being reached
856 (since it might exit!) */
857 && ! ((maybe_never || call_passed)
858 && may_trap_p (src)))
860 struct movable *m;
861 int regno = REGNO (SET_DEST (set));
863 /* A potential lossage is where we have a case where two insns
864 can be combined as long as they are both in the loop, but
865 we move one of them outside the loop. For large loops,
866 this can lose. The most common case of this is the address
867 of a function being called.
869 Therefore, if this register is marked as being used
870 exactly once if we are in a loop with calls
871 (a "large loop"), see if we can replace the usage of
872 this register with the source of this SET. If we can,
873 delete this insn.
875 Don't do this if P has a REG_RETVAL note or if we have
876 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
878 if (loop_info->has_call
879 && regs->array[regno].single_usage != 0
880 && regs->array[regno].single_usage != const0_rtx
881 && REGNO_FIRST_UID (regno) == INSN_UID (p)
882 && (REGNO_LAST_UID (regno)
883 == INSN_UID (regs->array[regno].single_usage))
884 && regs->array[regno].set_in_loop == 1
885 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
886 && ! side_effects_p (SET_SRC (set))
887 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
888 && (! SMALL_REGISTER_CLASSES
889 || (! (GET_CODE (SET_SRC (set)) == REG
890 && (REGNO (SET_SRC (set))
891 < FIRST_PSEUDO_REGISTER))))
892 /* This test is not redundant; SET_SRC (set) might be
893 a call-clobbered register and the life of REGNO
894 might span a call. */
895 && ! modified_between_p (SET_SRC (set), p,
896 regs->array[regno].single_usage)
897 && no_labels_between_p (p,
898 regs->array[regno].single_usage)
899 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
900 regs->array[regno].single_usage))
902 /* Replace any usage in a REG_EQUAL note. Must copy
903 the new source, so that we don't get rtx sharing
904 between the SET_SOURCE and REG_NOTES of insn p. */
905 REG_NOTES (regs->array[regno].single_usage)
906 = (replace_rtx
907 (REG_NOTES (regs->array[regno].single_usage),
908 SET_DEST (set), copy_rtx (SET_SRC (set))));
910 delete_insn (p);
911 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
912 i++)
913 regs->array[regno+i].set_in_loop = 0;
914 continue;
917 m = xmalloc (sizeof (struct movable));
918 m->next = 0;
919 m->insn = p;
920 m->set_src = src;
921 m->dependencies = dependencies;
922 m->set_dest = SET_DEST (set);
923 m->force = 0;
924 m->consec
925 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
926 m->done = 0;
927 m->forces = 0;
928 m->partial = 0;
929 m->move_insn = move_insn;
930 m->move_insn_first = 0;
931 m->insert_temp = insert_temp;
932 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
933 m->savemode = VOIDmode;
934 m->regno = regno;
935 /* Set M->cond if either loop_invariant_p
936 or consec_sets_invariant_p returned 2
937 (only conditionally invariant). */
938 m->cond = ((tem | tem1 | tem2) > 1);
939 m->global = LOOP_REG_GLOBAL_P (loop, regno);
940 m->match = 0;
941 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
942 m->savings = regs->array[regno].n_times_set;
943 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
944 m->savings += libcall_benefit (p);
945 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
946 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
947 /* Add M to the end of the chain MOVABLES. */
948 loop_movables_add (movables, m);
950 if (m->consec > 0)
952 /* It is possible for the first instruction to have a
953 REG_EQUAL note but a non-invariant SET_SRC, so we must
954 remember the status of the first instruction in case
955 the last instruction doesn't have a REG_EQUAL note. */
956 m->move_insn_first = m->move_insn;
958 /* Skip this insn, not checking REG_LIBCALL notes. */
959 p = next_nonnote_insn (p);
960 /* Skip the consecutive insns, if there are any. */
961 p = skip_consec_insns (p, m->consec);
962 /* Back up to the last insn of the consecutive group. */
963 p = prev_nonnote_insn (p);
965 /* We must now reset m->move_insn, m->is_equiv, and
966 possibly m->set_src to correspond to the effects of
967 all the insns. */
968 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
969 if (temp)
970 m->set_src = XEXP (temp, 0), m->move_insn = 1;
971 else
973 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
974 if (temp && CONSTANT_P (XEXP (temp, 0)))
975 m->set_src = XEXP (temp, 0), m->move_insn = 1;
976 else
977 m->move_insn = 0;
980 m->is_equiv
981 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
984 /* If this register is always set within a STRICT_LOW_PART
985 or set to zero, then its high bytes are constant.
986 So clear them outside the loop and within the loop
987 just load the low bytes.
988 We must check that the machine has an instruction to do so.
989 Also, if the value loaded into the register
990 depends on the same register, this cannot be done. */
991 else if (SET_SRC (set) == const0_rtx
992 && GET_CODE (NEXT_INSN (p)) == INSN
993 && (set1 = single_set (NEXT_INSN (p)))
994 && GET_CODE (set1) == SET
995 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
996 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
997 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
998 == SET_DEST (set))
999 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1001 int regno = REGNO (SET_DEST (set));
1002 if (regs->array[regno].set_in_loop == 2)
1004 struct movable *m;
1005 m = xmalloc (sizeof (struct movable));
1006 m->next = 0;
1007 m->insn = p;
1008 m->set_dest = SET_DEST (set);
1009 m->dependencies = 0;
1010 m->force = 0;
1011 m->consec = 0;
1012 m->done = 0;
1013 m->forces = 0;
1014 m->move_insn = 0;
1015 m->move_insn_first = 0;
1016 m->insert_temp = insert_temp;
1017 m->partial = 1;
1018 /* If the insn may not be executed on some cycles,
1019 we can't clear the whole reg; clear just high part.
1020 Not even if the reg is used only within this loop.
1021 Consider this:
1022 while (1)
1023 while (s != t) {
1024 if (foo ()) x = *s;
1025 use (x);
1027 Clearing x before the inner loop could clobber a value
1028 being saved from the last time around the outer loop.
1029 However, if the reg is not used outside this loop
1030 and all uses of the register are in the same
1031 basic block as the store, there is no problem.
1033 If this insn was made by loop, we don't know its
1034 INSN_LUID and hence must make a conservative
1035 assumption. */
1036 m->global = (INSN_UID (p) >= max_uid_for_loop
1037 || LOOP_REG_GLOBAL_P (loop, regno)
1038 || (labels_in_range_p
1039 (p, REGNO_FIRST_LUID (regno))));
1040 if (maybe_never && m->global)
1041 m->savemode = GET_MODE (SET_SRC (set1));
1042 else
1043 m->savemode = VOIDmode;
1044 m->regno = regno;
1045 m->cond = 0;
1046 m->match = 0;
1047 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1048 m->savings = 1;
1049 for (i = 0;
1050 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1051 i++)
1052 regs->array[regno+i].set_in_loop = -1;
1053 /* Add M to the end of the chain MOVABLES. */
1054 loop_movables_add (movables, m);
1059 /* Past a call insn, we get to insns which might not be executed
1060 because the call might exit. This matters for insns that trap.
1061 Constant and pure call insns always return, so they don't count. */
1062 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1063 call_passed = 1;
1064 /* Past a label or a jump, we get to insns for which we
1065 can't count on whether or how many times they will be
1066 executed during each iteration. Therefore, we can
1067 only move out sets of trivial variables
1068 (those not used after the loop). */
1069 /* Similar code appears twice in strength_reduce. */
1070 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1071 /* If we enter the loop in the middle, and scan around to the
1072 beginning, don't set maybe_never for that. This must be an
1073 unconditional jump, otherwise the code at the top of the
1074 loop might never be executed. Unconditional jumps are
1075 followed by a barrier then the loop_end. */
1076 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1077 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1078 && any_uncondjump_p (p)))
1079 maybe_never = 1;
1080 else if (GET_CODE (p) == NOTE)
1082 /* At the virtual top of a converted loop, insns are again known to
1083 be executed: logically, the loop begins here even though the exit
1084 code has been duplicated. */
1085 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1086 maybe_never = call_passed = 0;
1087 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1088 loop_depth++;
1089 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1090 loop_depth--;
1094 /* If one movable subsumes another, ignore that other. */
1096 ignore_some_movables (movables);
1098 /* For each movable insn, see if the reg that it loads
1099 leads when it dies right into another conditionally movable insn.
1100 If so, record that the second insn "forces" the first one,
1101 since the second can be moved only if the first is. */
1103 force_movables (movables);
1105 /* See if there are multiple movable insns that load the same value.
1106 If there are, make all but the first point at the first one
1107 through the `match' field, and add the priorities of them
1108 all together as the priority of the first. */
1110 combine_movables (movables, regs);
1112 /* Now consider each movable insn to decide whether it is worth moving.
1113 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1115 For machines with few registers this increases code size, so do not
1116 move moveables when optimizing for code size on such machines.
1117 (The 18 below is the value for i386.) */
1119 if (!optimize_size
1120 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1122 move_movables (loop, movables, threshold, insn_count);
1124 /* Recalculate regs->array if move_movables has created new
1125 registers. */
1126 if (max_reg_num () > regs->num)
1128 loop_regs_scan (loop, 0);
1129 for (update_start = loop_start;
1130 PREV_INSN (update_start)
1131 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1132 update_start = PREV_INSN (update_start))
1134 update_end = NEXT_INSN (loop_end);
1136 reg_scan_update (update_start, update_end, loop_max_reg);
1137 loop_max_reg = max_reg_num ();
1141 /* Now candidates that still are negative are those not moved.
1142 Change regs->array[I].set_in_loop to indicate that those are not actually
1143 invariant. */
1144 for (i = 0; i < regs->num; i++)
1145 if (regs->array[i].set_in_loop < 0)
1146 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1148 /* Now that we've moved some things out of the loop, we might be able to
1149 hoist even more memory references. */
1150 load_mems (loop);
1152 /* Recalculate regs->array if load_mems has created new registers. */
1153 if (max_reg_num () > regs->num)
1154 loop_regs_scan (loop, 0);
1156 for (update_start = loop_start;
1157 PREV_INSN (update_start)
1158 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1159 update_start = PREV_INSN (update_start))
1161 update_end = NEXT_INSN (loop_end);
1163 reg_scan_update (update_start, update_end, loop_max_reg);
1164 loop_max_reg = max_reg_num ();
1166 if (flag_strength_reduce)
1168 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1169 /* Ensure our label doesn't go away. */
1170 LABEL_NUSES (update_end)++;
1172 strength_reduce (loop, flags);
1174 reg_scan_update (update_start, update_end, loop_max_reg);
1175 loop_max_reg = max_reg_num ();
1177 if (update_end && GET_CODE (update_end) == CODE_LABEL
1178 && --LABEL_NUSES (update_end) == 0)
1179 delete_related_insns (update_end);
1183 /* The movable information is required for strength reduction. */
1184 loop_movables_free (movables);
1186 free (regs->array);
1187 regs->array = 0;
1188 regs->num = 0;
1191 /* Add elements to *OUTPUT to record all the pseudo-regs
1192 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1194 void
1195 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1197 enum rtx_code code;
1198 const char *fmt;
1199 int i;
1201 code = GET_CODE (in_this);
1203 switch (code)
1205 case PC:
1206 case CC0:
1207 case CONST_INT:
1208 case CONST_DOUBLE:
1209 case CONST:
1210 case SYMBOL_REF:
1211 case LABEL_REF:
1212 return;
1214 case REG:
1215 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1216 && ! reg_mentioned_p (in_this, not_in_this))
1217 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1218 return;
1220 default:
1221 break;
1224 fmt = GET_RTX_FORMAT (code);
1225 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1227 int j;
1229 switch (fmt[i])
1231 case 'E':
1232 for (j = 0; j < XVECLEN (in_this, i); j++)
1233 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1234 break;
1236 case 'e':
1237 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1238 break;
1243 /* Check what regs are referred to in the libcall block ending with INSN,
1244 aside from those mentioned in the equivalent value.
1245 If there are none, return 0.
1246 If there are one or more, return an EXPR_LIST containing all of them. */
1249 libcall_other_reg (rtx insn, rtx equiv)
1251 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1252 rtx p = XEXP (note, 0);
1253 rtx output = 0;
1255 /* First, find all the regs used in the libcall block
1256 that are not mentioned as inputs to the result. */
1258 while (p != insn)
1260 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1261 || GET_CODE (p) == CALL_INSN)
1262 record_excess_regs (PATTERN (p), equiv, &output);
1263 p = NEXT_INSN (p);
1266 return output;
1269 /* Return 1 if all uses of REG
1270 are between INSN and the end of the basic block. */
1272 static int
1273 reg_in_basic_block_p (rtx insn, rtx reg)
1275 int regno = REGNO (reg);
1276 rtx p;
1278 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1279 return 0;
1281 /* Search this basic block for the already recorded last use of the reg. */
1282 for (p = insn; p; p = NEXT_INSN (p))
1284 switch (GET_CODE (p))
1286 case NOTE:
1287 break;
1289 case INSN:
1290 case CALL_INSN:
1291 /* Ordinary insn: if this is the last use, we win. */
1292 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1293 return 1;
1294 break;
1296 case JUMP_INSN:
1297 /* Jump insn: if this is the last use, we win. */
1298 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1299 return 1;
1300 /* Otherwise, it's the end of the basic block, so we lose. */
1301 return 0;
1303 case CODE_LABEL:
1304 case BARRIER:
1305 /* It's the end of the basic block, so we lose. */
1306 return 0;
1308 default:
1309 break;
1313 /* The "last use" that was recorded can't be found after the first
1314 use. This can happen when the last use was deleted while
1315 processing an inner loop, this inner loop was then completely
1316 unrolled, and the outer loop is always exited after the inner loop,
1317 so that everything after the first use becomes a single basic block. */
1318 return 1;
1321 /* Compute the benefit of eliminating the insns in the block whose
1322 last insn is LAST. This may be a group of insns used to compute a
1323 value directly or can contain a library call. */
1325 static int
1326 libcall_benefit (rtx last)
1328 rtx insn;
1329 int benefit = 0;
1331 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1332 insn != last; insn = NEXT_INSN (insn))
1334 if (GET_CODE (insn) == CALL_INSN)
1335 benefit += 10; /* Assume at least this many insns in a library
1336 routine. */
1337 else if (GET_CODE (insn) == INSN
1338 && GET_CODE (PATTERN (insn)) != USE
1339 && GET_CODE (PATTERN (insn)) != CLOBBER)
1340 benefit++;
1343 return benefit;
1346 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1348 static rtx
1349 skip_consec_insns (rtx insn, int count)
1351 for (; count > 0; count--)
1353 rtx temp;
1355 /* If first insn of libcall sequence, skip to end. */
1356 /* Do this at start of loop, since INSN is guaranteed to
1357 be an insn here. */
1358 if (GET_CODE (insn) != NOTE
1359 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1360 insn = XEXP (temp, 0);
1363 insn = NEXT_INSN (insn);
1364 while (GET_CODE (insn) == NOTE);
1367 return insn;
1370 /* Ignore any movable whose insn falls within a libcall
1371 which is part of another movable.
1372 We make use of the fact that the movable for the libcall value
1373 was made later and so appears later on the chain. */
1375 static void
1376 ignore_some_movables (struct loop_movables *movables)
1378 struct movable *m, *m1;
1380 for (m = movables->head; m; m = m->next)
1382 /* Is this a movable for the value of a libcall? */
1383 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1384 if (note)
1386 rtx insn;
1387 /* Check for earlier movables inside that range,
1388 and mark them invalid. We cannot use LUIDs here because
1389 insns created by loop.c for prior loops don't have LUIDs.
1390 Rather than reject all such insns from movables, we just
1391 explicitly check each insn in the libcall (since invariant
1392 libcalls aren't that common). */
1393 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1394 for (m1 = movables->head; m1 != m; m1 = m1->next)
1395 if (m1->insn == insn)
1396 m1->done = 1;
1401 /* For each movable insn, see if the reg that it loads
1402 leads when it dies right into another conditionally movable insn.
1403 If so, record that the second insn "forces" the first one,
1404 since the second can be moved only if the first is. */
1406 static void
1407 force_movables (struct loop_movables *movables)
1409 struct movable *m, *m1;
1411 for (m1 = movables->head; m1; m1 = m1->next)
1412 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1413 if (!m1->partial && !m1->done)
1415 int regno = m1->regno;
1416 for (m = m1->next; m; m = m->next)
1417 /* ??? Could this be a bug? What if CSE caused the
1418 register of M1 to be used after this insn?
1419 Since CSE does not update regno_last_uid,
1420 this insn M->insn might not be where it dies.
1421 But very likely this doesn't matter; what matters is
1422 that M's reg is computed from M1's reg. */
1423 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1424 && !m->done)
1425 break;
1426 if (m != 0 && m->set_src == m1->set_dest
1427 /* If m->consec, m->set_src isn't valid. */
1428 && m->consec == 0)
1429 m = 0;
1431 /* Increase the priority of the moving the first insn
1432 since it permits the second to be moved as well. */
1433 if (m != 0)
1435 m->forces = m1;
1436 m1->lifetime += m->lifetime;
1437 m1->savings += m->savings;
1442 /* Find invariant expressions that are equal and can be combined into
1443 one register. */
1445 static void
1446 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1448 struct movable *m;
1449 char *matched_regs = xmalloc (regs->num);
1450 enum machine_mode mode;
1452 /* Regs that are set more than once are not allowed to match
1453 or be matched. I'm no longer sure why not. */
1454 /* Only pseudo registers are allowed to match or be matched,
1455 since move_movables does not validate the change. */
1456 /* Perhaps testing m->consec_sets would be more appropriate here? */
1458 for (m = movables->head; m; m = m->next)
1459 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1460 && m->regno >= FIRST_PSEUDO_REGISTER
1461 && !m->insert_temp
1462 && !m->partial)
1464 struct movable *m1;
1465 int regno = m->regno;
1467 memset (matched_regs, 0, regs->num);
1468 matched_regs[regno] = 1;
1470 /* We want later insns to match the first one. Don't make the first
1471 one match any later ones. So start this loop at m->next. */
1472 for (m1 = m->next; m1; m1 = m1->next)
1473 if (m != m1 && m1->match == 0
1474 && !m1->insert_temp
1475 && regs->array[m1->regno].n_times_set == 1
1476 && m1->regno >= FIRST_PSEUDO_REGISTER
1477 /* A reg used outside the loop mustn't be eliminated. */
1478 && !m1->global
1479 /* A reg used for zero-extending mustn't be eliminated. */
1480 && !m1->partial
1481 && (matched_regs[m1->regno]
1484 /* Can combine regs with different modes loaded from the
1485 same constant only if the modes are the same or
1486 if both are integer modes with M wider or the same
1487 width as M1. The check for integer is redundant, but
1488 safe, since the only case of differing destination
1489 modes with equal sources is when both sources are
1490 VOIDmode, i.e., CONST_INT. */
1491 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1492 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1493 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1494 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1495 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1496 /* See if the source of M1 says it matches M. */
1497 && ((GET_CODE (m1->set_src) == REG
1498 && matched_regs[REGNO (m1->set_src)])
1499 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1500 movables, regs))))
1501 && ((m->dependencies == m1->dependencies)
1502 || rtx_equal_p (m->dependencies, m1->dependencies)))
1504 m->lifetime += m1->lifetime;
1505 m->savings += m1->savings;
1506 m1->done = 1;
1507 m1->match = m;
1508 matched_regs[m1->regno] = 1;
1512 /* Now combine the regs used for zero-extension.
1513 This can be done for those not marked `global'
1514 provided their lives don't overlap. */
1516 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1517 mode = GET_MODE_WIDER_MODE (mode))
1519 struct movable *m0 = 0;
1521 /* Combine all the registers for extension from mode MODE.
1522 Don't combine any that are used outside this loop. */
1523 for (m = movables->head; m; m = m->next)
1524 if (m->partial && ! m->global
1525 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1527 struct movable *m1;
1529 int first = REGNO_FIRST_LUID (m->regno);
1530 int last = REGNO_LAST_LUID (m->regno);
1532 if (m0 == 0)
1534 /* First one: don't check for overlap, just record it. */
1535 m0 = m;
1536 continue;
1539 /* Make sure they extend to the same mode.
1540 (Almost always true.) */
1541 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1542 continue;
1544 /* We already have one: check for overlap with those
1545 already combined together. */
1546 for (m1 = movables->head; m1 != m; m1 = m1->next)
1547 if (m1 == m0 || (m1->partial && m1->match == m0))
1548 if (! (REGNO_FIRST_LUID (m1->regno) > last
1549 || REGNO_LAST_LUID (m1->regno) < first))
1550 goto overlap;
1552 /* No overlap: we can combine this with the others. */
1553 m0->lifetime += m->lifetime;
1554 m0->savings += m->savings;
1555 m->done = 1;
1556 m->match = m0;
1558 overlap:
1563 /* Clean up. */
1564 free (matched_regs);
1567 /* Returns the number of movable instructions in LOOP that were not
1568 moved outside the loop. */
1570 static int
1571 num_unmoved_movables (const struct loop *loop)
1573 int num = 0;
1574 struct movable *m;
1576 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1577 if (!m->done)
1578 ++num;
1580 return num;
1584 /* Return 1 if regs X and Y will become the same if moved. */
1586 static int
1587 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1589 unsigned int xn = REGNO (x);
1590 unsigned int yn = REGNO (y);
1591 struct movable *mx, *my;
1593 for (mx = movables->head; mx; mx = mx->next)
1594 if (mx->regno == xn)
1595 break;
1597 for (my = movables->head; my; my = my->next)
1598 if (my->regno == yn)
1599 break;
1601 return (mx && my
1602 && ((mx->match == my->match && mx->match != 0)
1603 || mx->match == my
1604 || mx == my->match));
1607 /* Return 1 if X and Y are identical-looking rtx's.
1608 This is the Lisp function EQUAL for rtx arguments.
1610 If two registers are matching movables or a movable register and an
1611 equivalent constant, consider them equal. */
1613 static int
1614 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1615 struct loop_regs *regs)
1617 int i;
1618 int j;
1619 struct movable *m;
1620 enum rtx_code code;
1621 const char *fmt;
1623 if (x == y)
1624 return 1;
1625 if (x == 0 || y == 0)
1626 return 0;
1628 code = GET_CODE (x);
1630 /* If we have a register and a constant, they may sometimes be
1631 equal. */
1632 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1633 && CONSTANT_P (y))
1635 for (m = movables->head; m; m = m->next)
1636 if (m->move_insn && m->regno == REGNO (x)
1637 && rtx_equal_p (m->set_src, y))
1638 return 1;
1640 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1641 && CONSTANT_P (x))
1643 for (m = movables->head; m; m = m->next)
1644 if (m->move_insn && m->regno == REGNO (y)
1645 && rtx_equal_p (m->set_src, x))
1646 return 1;
1649 /* Otherwise, rtx's of different codes cannot be equal. */
1650 if (code != GET_CODE (y))
1651 return 0;
1653 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1654 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1656 if (GET_MODE (x) != GET_MODE (y))
1657 return 0;
1659 /* These three types of rtx's can be compared nonrecursively. */
1660 if (code == REG)
1661 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1663 if (code == LABEL_REF)
1664 return XEXP (x, 0) == XEXP (y, 0);
1665 if (code == SYMBOL_REF)
1666 return XSTR (x, 0) == XSTR (y, 0);
1668 /* Compare the elements. If any pair of corresponding elements
1669 fail to match, return 0 for the whole things. */
1671 fmt = GET_RTX_FORMAT (code);
1672 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1674 switch (fmt[i])
1676 case 'w':
1677 if (XWINT (x, i) != XWINT (y, i))
1678 return 0;
1679 break;
1681 case 'i':
1682 if (XINT (x, i) != XINT (y, i))
1683 return 0;
1684 break;
1686 case 'E':
1687 /* Two vectors must have the same length. */
1688 if (XVECLEN (x, i) != XVECLEN (y, i))
1689 return 0;
1691 /* And the corresponding elements must match. */
1692 for (j = 0; j < XVECLEN (x, i); j++)
1693 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1694 movables, regs) == 0)
1695 return 0;
1696 break;
1698 case 'e':
1699 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1700 == 0)
1701 return 0;
1702 break;
1704 case 's':
1705 if (strcmp (XSTR (x, i), XSTR (y, i)))
1706 return 0;
1707 break;
1709 case 'u':
1710 /* These are just backpointers, so they don't matter. */
1711 break;
1713 case '0':
1714 break;
1716 /* It is believed that rtx's at this level will never
1717 contain anything but integers and other rtx's,
1718 except for within LABEL_REFs and SYMBOL_REFs. */
1719 default:
1720 abort ();
1723 return 1;
1726 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1727 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1728 references is incremented once for each added note. */
1730 static void
1731 add_label_notes (rtx x, rtx insns)
1733 enum rtx_code code = GET_CODE (x);
1734 int i, j;
1735 const char *fmt;
1736 rtx insn;
1738 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1740 /* This code used to ignore labels that referred to dispatch tables to
1741 avoid flow generating (slightly) worse code.
1743 We no longer ignore such label references (see LABEL_REF handling in
1744 mark_jump_label for additional information). */
1745 for (insn = insns; insn; insn = NEXT_INSN (insn))
1746 if (reg_mentioned_p (XEXP (x, 0), insn))
1748 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1749 REG_NOTES (insn));
1750 if (LABEL_P (XEXP (x, 0)))
1751 LABEL_NUSES (XEXP (x, 0))++;
1755 fmt = GET_RTX_FORMAT (code);
1756 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1758 if (fmt[i] == 'e')
1759 add_label_notes (XEXP (x, i), insns);
1760 else if (fmt[i] == 'E')
1761 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1762 add_label_notes (XVECEXP (x, i, j), insns);
1766 /* Scan MOVABLES, and move the insns that deserve to be moved.
1767 If two matching movables are combined, replace one reg with the
1768 other throughout. */
1770 static void
1771 move_movables (struct loop *loop, struct loop_movables *movables,
1772 int threshold, int insn_count)
1774 struct loop_regs *regs = LOOP_REGS (loop);
1775 int nregs = regs->num;
1776 rtx new_start = 0;
1777 struct movable *m;
1778 rtx p;
1779 rtx loop_start = loop->start;
1780 rtx loop_end = loop->end;
1781 /* Map of pseudo-register replacements to handle combining
1782 when we move several insns that load the same value
1783 into different pseudo-registers. */
1784 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
1785 char *already_moved = xcalloc (nregs, sizeof (char));
1787 for (m = movables->head; m; m = m->next)
1789 /* Describe this movable insn. */
1791 if (loop_dump_stream)
1793 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1794 INSN_UID (m->insn), m->regno, m->lifetime);
1795 if (m->consec > 0)
1796 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1797 if (m->cond)
1798 fprintf (loop_dump_stream, "cond ");
1799 if (m->force)
1800 fprintf (loop_dump_stream, "force ");
1801 if (m->global)
1802 fprintf (loop_dump_stream, "global ");
1803 if (m->done)
1804 fprintf (loop_dump_stream, "done ");
1805 if (m->move_insn)
1806 fprintf (loop_dump_stream, "move-insn ");
1807 if (m->match)
1808 fprintf (loop_dump_stream, "matches %d ",
1809 INSN_UID (m->match->insn));
1810 if (m->forces)
1811 fprintf (loop_dump_stream, "forces %d ",
1812 INSN_UID (m->forces->insn));
1815 /* Ignore the insn if it's already done (it matched something else).
1816 Otherwise, see if it is now safe to move. */
1818 if (!m->done
1819 && (! m->cond
1820 || (1 == loop_invariant_p (loop, m->set_src)
1821 && (m->dependencies == 0
1822 || 1 == loop_invariant_p (loop, m->dependencies))
1823 && (m->consec == 0
1824 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1825 m->consec + 1,
1826 m->insn))))
1827 && (! m->forces || m->forces->done))
1829 int regno;
1830 rtx p;
1831 int savings = m->savings;
1833 /* We have an insn that is safe to move.
1834 Compute its desirability. */
1836 p = m->insn;
1837 regno = m->regno;
1839 if (loop_dump_stream)
1840 fprintf (loop_dump_stream, "savings %d ", savings);
1842 if (regs->array[regno].moved_once && loop_dump_stream)
1843 fprintf (loop_dump_stream, "halved since already moved ");
1845 /* An insn MUST be moved if we already moved something else
1846 which is safe only if this one is moved too: that is,
1847 if already_moved[REGNO] is nonzero. */
1849 /* An insn is desirable to move if the new lifetime of the
1850 register is no more than THRESHOLD times the old lifetime.
1851 If it's not desirable, it means the loop is so big
1852 that moving won't speed things up much,
1853 and it is liable to make register usage worse. */
1855 /* It is also desirable to move if it can be moved at no
1856 extra cost because something else was already moved. */
1858 if (already_moved[regno]
1859 || flag_move_all_movables
1860 || (threshold * savings * m->lifetime) >=
1861 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1862 || (m->forces && m->forces->done
1863 && regs->array[m->forces->regno].n_times_set == 1))
1865 int count;
1866 struct movable *m1;
1867 rtx first = NULL_RTX;
1868 rtx newreg = NULL_RTX;
1870 if (m->insert_temp)
1871 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
1873 /* Now move the insns that set the reg. */
1875 if (m->partial && m->match)
1877 rtx newpat, i1;
1878 rtx r1, r2;
1879 /* Find the end of this chain of matching regs.
1880 Thus, we load each reg in the chain from that one reg.
1881 And that reg is loaded with 0 directly,
1882 since it has ->match == 0. */
1883 for (m1 = m; m1->match; m1 = m1->match);
1884 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1885 SET_DEST (PATTERN (m1->insn)));
1886 i1 = loop_insn_hoist (loop, newpat);
1888 /* Mark the moved, invariant reg as being allowed to
1889 share a hard reg with the other matching invariant. */
1890 REG_NOTES (i1) = REG_NOTES (m->insn);
1891 r1 = SET_DEST (PATTERN (m->insn));
1892 r2 = SET_DEST (PATTERN (m1->insn));
1893 regs_may_share
1894 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1895 gen_rtx_EXPR_LIST (VOIDmode, r2,
1896 regs_may_share));
1897 delete_insn (m->insn);
1899 if (new_start == 0)
1900 new_start = i1;
1902 if (loop_dump_stream)
1903 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1905 /* If we are to re-generate the item being moved with a
1906 new move insn, first delete what we have and then emit
1907 the move insn before the loop. */
1908 else if (m->move_insn)
1910 rtx i1, temp, seq;
1912 for (count = m->consec; count >= 0; count--)
1914 /* If this is the first insn of a library call sequence,
1915 something is very wrong. */
1916 if (GET_CODE (p) != NOTE
1917 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1918 abort ();
1920 /* If this is the last insn of a libcall sequence, then
1921 delete every insn in the sequence except the last.
1922 The last insn is handled in the normal manner. */
1923 if (GET_CODE (p) != NOTE
1924 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1926 temp = XEXP (temp, 0);
1927 while (temp != p)
1928 temp = delete_insn (temp);
1931 temp = p;
1932 p = delete_insn (p);
1934 /* simplify_giv_expr expects that it can walk the insns
1935 at m->insn forwards and see this old sequence we are
1936 tossing here. delete_insn does preserve the next
1937 pointers, but when we skip over a NOTE we must fix
1938 it up. Otherwise that code walks into the non-deleted
1939 insn stream. */
1940 while (p && GET_CODE (p) == NOTE)
1941 p = NEXT_INSN (temp) = NEXT_INSN (p);
1943 if (m->insert_temp)
1945 /* Replace the original insn with a move from
1946 our newly created temp. */
1947 start_sequence ();
1948 emit_move_insn (m->set_dest, newreg);
1949 seq = get_insns ();
1950 end_sequence ();
1951 emit_insn_before (seq, p);
1955 start_sequence ();
1956 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
1957 m->set_src);
1958 seq = get_insns ();
1959 end_sequence ();
1961 add_label_notes (m->set_src, seq);
1963 i1 = loop_insn_hoist (loop, seq);
1964 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1965 set_unique_reg_note (i1,
1966 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1967 m->set_src);
1969 if (loop_dump_stream)
1970 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1972 /* The more regs we move, the less we like moving them. */
1973 threshold -= 3;
1975 else
1977 for (count = m->consec; count >= 0; count--)
1979 rtx i1, temp;
1981 /* If first insn of libcall sequence, skip to end. */
1982 /* Do this at start of loop, since p is guaranteed to
1983 be an insn here. */
1984 if (GET_CODE (p) != NOTE
1985 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1986 p = XEXP (temp, 0);
1988 /* If last insn of libcall sequence, move all
1989 insns except the last before the loop. The last
1990 insn is handled in the normal manner. */
1991 if (GET_CODE (p) != NOTE
1992 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1994 rtx fn_address = 0;
1995 rtx fn_reg = 0;
1996 rtx fn_address_insn = 0;
1998 first = 0;
1999 for (temp = XEXP (temp, 0); temp != p;
2000 temp = NEXT_INSN (temp))
2002 rtx body;
2003 rtx n;
2004 rtx next;
2006 if (GET_CODE (temp) == NOTE)
2007 continue;
2009 body = PATTERN (temp);
2011 /* Find the next insn after TEMP,
2012 not counting USE or NOTE insns. */
2013 for (next = NEXT_INSN (temp); next != p;
2014 next = NEXT_INSN (next))
2015 if (! (GET_CODE (next) == INSN
2016 && GET_CODE (PATTERN (next)) == USE)
2017 && GET_CODE (next) != NOTE)
2018 break;
2020 /* If that is the call, this may be the insn
2021 that loads the function address.
2023 Extract the function address from the insn
2024 that loads it into a register.
2025 If this insn was cse'd, we get incorrect code.
2027 So emit a new move insn that copies the
2028 function address into the register that the
2029 call insn will use. flow.c will delete any
2030 redundant stores that we have created. */
2031 if (GET_CODE (next) == CALL_INSN
2032 && GET_CODE (body) == SET
2033 && GET_CODE (SET_DEST (body)) == REG
2034 && (n = find_reg_note (temp, REG_EQUAL,
2035 NULL_RTX)))
2037 fn_reg = SET_SRC (body);
2038 if (GET_CODE (fn_reg) != REG)
2039 fn_reg = SET_DEST (body);
2040 fn_address = XEXP (n, 0);
2041 fn_address_insn = temp;
2043 /* We have the call insn.
2044 If it uses the register we suspect it might,
2045 load it with the correct address directly. */
2046 if (GET_CODE (temp) == CALL_INSN
2047 && fn_address != 0
2048 && reg_referenced_p (fn_reg, body))
2049 loop_insn_emit_after (loop, 0, fn_address_insn,
2050 gen_move_insn
2051 (fn_reg, fn_address));
2053 if (GET_CODE (temp) == CALL_INSN)
2055 i1 = loop_call_insn_hoist (loop, body);
2056 /* Because the USAGE information potentially
2057 contains objects other than hard registers
2058 we need to copy it. */
2059 if (CALL_INSN_FUNCTION_USAGE (temp))
2060 CALL_INSN_FUNCTION_USAGE (i1)
2061 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2063 else
2064 i1 = loop_insn_hoist (loop, body);
2065 if (first == 0)
2066 first = i1;
2067 if (temp == fn_address_insn)
2068 fn_address_insn = i1;
2069 REG_NOTES (i1) = REG_NOTES (temp);
2070 REG_NOTES (temp) = NULL;
2071 delete_insn (temp);
2073 if (new_start == 0)
2074 new_start = first;
2076 if (m->savemode != VOIDmode)
2078 /* P sets REG to zero; but we should clear only
2079 the bits that are not covered by the mode
2080 m->savemode. */
2081 rtx reg = m->set_dest;
2082 rtx sequence;
2083 rtx tem;
2085 start_sequence ();
2086 tem = expand_simple_binop
2087 (GET_MODE (reg), AND, reg,
2088 GEN_INT ((((HOST_WIDE_INT) 1
2089 << GET_MODE_BITSIZE (m->savemode)))
2090 - 1),
2091 reg, 1, OPTAB_LIB_WIDEN);
2092 if (tem == 0)
2093 abort ();
2094 if (tem != reg)
2095 emit_move_insn (reg, tem);
2096 sequence = get_insns ();
2097 end_sequence ();
2098 i1 = loop_insn_hoist (loop, sequence);
2100 else if (GET_CODE (p) == CALL_INSN)
2102 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2103 /* Because the USAGE information potentially
2104 contains objects other than hard registers
2105 we need to copy it. */
2106 if (CALL_INSN_FUNCTION_USAGE (p))
2107 CALL_INSN_FUNCTION_USAGE (i1)
2108 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2110 else if (count == m->consec && m->move_insn_first)
2112 rtx seq;
2113 /* The SET_SRC might not be invariant, so we must
2114 use the REG_EQUAL note. */
2115 start_sequence ();
2116 emit_move_insn (m->set_dest, m->set_src);
2117 seq = get_insns ();
2118 end_sequence ();
2120 add_label_notes (m->set_src, seq);
2122 i1 = loop_insn_hoist (loop, seq);
2123 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2124 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2125 : REG_EQUAL, m->set_src);
2127 else if (m->insert_temp)
2129 rtx *reg_map2 = xcalloc (REGNO (newreg),
2130 sizeof(rtx));
2131 reg_map2 [m->regno] = newreg;
2133 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2134 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2135 free (reg_map2);
2137 else
2138 i1 = loop_insn_hoist (loop, PATTERN (p));
2140 if (REG_NOTES (i1) == 0)
2142 REG_NOTES (i1) = REG_NOTES (p);
2143 REG_NOTES (p) = NULL;
2145 /* If there is a REG_EQUAL note present whose value
2146 is not loop invariant, then delete it, since it
2147 may cause problems with later optimization passes.
2148 It is possible for cse to create such notes
2149 like this as a result of record_jump_cond. */
2151 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2152 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2153 remove_note (i1, temp);
2156 if (new_start == 0)
2157 new_start = i1;
2159 if (loop_dump_stream)
2160 fprintf (loop_dump_stream, " moved to %d",
2161 INSN_UID (i1));
2163 /* If library call, now fix the REG_NOTES that contain
2164 insn pointers, namely REG_LIBCALL on FIRST
2165 and REG_RETVAL on I1. */
2166 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2168 XEXP (temp, 0) = first;
2169 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2170 XEXP (temp, 0) = i1;
2173 temp = p;
2174 delete_insn (p);
2175 p = NEXT_INSN (p);
2177 /* simplify_giv_expr expects that it can walk the insns
2178 at m->insn forwards and see this old sequence we are
2179 tossing here. delete_insn does preserve the next
2180 pointers, but when we skip over a NOTE we must fix
2181 it up. Otherwise that code walks into the non-deleted
2182 insn stream. */
2183 while (p && GET_CODE (p) == NOTE)
2184 p = NEXT_INSN (temp) = NEXT_INSN (p);
2186 if (m->insert_temp)
2188 rtx seq;
2189 /* Replace the original insn with a move from
2190 our newly created temp. */
2191 start_sequence ();
2192 emit_move_insn (m->set_dest, newreg);
2193 seq = get_insns ();
2194 end_sequence ();
2195 emit_insn_before (seq, p);
2199 /* The more regs we move, the less we like moving them. */
2200 threshold -= 3;
2203 m->done = 1;
2205 if (!m->insert_temp)
2207 /* Any other movable that loads the same register
2208 MUST be moved. */
2209 already_moved[regno] = 1;
2211 /* This reg has been moved out of one loop. */
2212 regs->array[regno].moved_once = 1;
2214 /* The reg set here is now invariant. */
2215 if (! m->partial)
2217 int i;
2218 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2219 regs->array[regno+i].set_in_loop = 0;
2222 /* Change the length-of-life info for the register
2223 to say it lives at least the full length of this loop.
2224 This will help guide optimizations in outer loops. */
2226 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2227 /* This is the old insn before all the moved insns.
2228 We can't use the moved insn because it is out of range
2229 in uid_luid. Only the old insns have luids. */
2230 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2231 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2232 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2235 /* Combine with this moved insn any other matching movables. */
2237 if (! m->partial)
2238 for (m1 = movables->head; m1; m1 = m1->next)
2239 if (m1->match == m)
2241 rtx temp;
2243 /* Schedule the reg loaded by M1
2244 for replacement so that shares the reg of M.
2245 If the modes differ (only possible in restricted
2246 circumstances, make a SUBREG.
2248 Note this assumes that the target dependent files
2249 treat REG and SUBREG equally, including within
2250 GO_IF_LEGITIMATE_ADDRESS and in all the
2251 predicates since we never verify that replacing the
2252 original register with a SUBREG results in a
2253 recognizable insn. */
2254 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2255 reg_map[m1->regno] = m->set_dest;
2256 else
2257 reg_map[m1->regno]
2258 = gen_lowpart_common (GET_MODE (m1->set_dest),
2259 m->set_dest);
2261 /* Get rid of the matching insn
2262 and prevent further processing of it. */
2263 m1->done = 1;
2265 /* If library call, delete all insns. */
2266 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2267 NULL_RTX)))
2268 delete_insn_chain (XEXP (temp, 0), m1->insn);
2269 else
2270 delete_insn (m1->insn);
2272 /* Any other movable that loads the same register
2273 MUST be moved. */
2274 already_moved[m1->regno] = 1;
2276 /* The reg merged here is now invariant,
2277 if the reg it matches is invariant. */
2278 if (! m->partial)
2280 int i;
2281 for (i = 0;
2282 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2283 i++)
2284 regs->array[m1->regno+i].set_in_loop = 0;
2288 else if (loop_dump_stream)
2289 fprintf (loop_dump_stream, "not desirable");
2291 else if (loop_dump_stream && !m->match)
2292 fprintf (loop_dump_stream, "not safe");
2294 if (loop_dump_stream)
2295 fprintf (loop_dump_stream, "\n");
2298 if (new_start == 0)
2299 new_start = loop_start;
2301 /* Go through all the instructions in the loop, making
2302 all the register substitutions scheduled in REG_MAP. */
2303 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2304 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2305 || GET_CODE (p) == CALL_INSN)
2307 replace_regs (PATTERN (p), reg_map, nregs, 0);
2308 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2309 INSN_CODE (p) = -1;
2312 /* Clean up. */
2313 free (reg_map);
2314 free (already_moved);
2318 static void
2319 loop_movables_add (struct loop_movables *movables, struct movable *m)
2321 if (movables->head == 0)
2322 movables->head = m;
2323 else
2324 movables->last->next = m;
2325 movables->last = m;
2329 static void
2330 loop_movables_free (struct loop_movables *movables)
2332 struct movable *m;
2333 struct movable *m_next;
2335 for (m = movables->head; m; m = m_next)
2337 m_next = m->next;
2338 free (m);
2342 #if 0
2343 /* Scan X and replace the address of any MEM in it with ADDR.
2344 REG is the address that MEM should have before the replacement. */
2346 static void
2347 replace_call_address (rtx x, rtx reg, rtx addr)
2349 enum rtx_code code;
2350 int i;
2351 const char *fmt;
2353 if (x == 0)
2354 return;
2355 code = GET_CODE (x);
2356 switch (code)
2358 case PC:
2359 case CC0:
2360 case CONST_INT:
2361 case CONST_DOUBLE:
2362 case CONST:
2363 case SYMBOL_REF:
2364 case LABEL_REF:
2365 case REG:
2366 return;
2368 case SET:
2369 /* Short cut for very common case. */
2370 replace_call_address (XEXP (x, 1), reg, addr);
2371 return;
2373 case CALL:
2374 /* Short cut for very common case. */
2375 replace_call_address (XEXP (x, 0), reg, addr);
2376 return;
2378 case MEM:
2379 /* If this MEM uses a reg other than the one we expected,
2380 something is wrong. */
2381 if (XEXP (x, 0) != reg)
2382 abort ();
2383 XEXP (x, 0) = addr;
2384 return;
2386 default:
2387 break;
2390 fmt = GET_RTX_FORMAT (code);
2391 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2393 if (fmt[i] == 'e')
2394 replace_call_address (XEXP (x, i), reg, addr);
2395 else if (fmt[i] == 'E')
2397 int j;
2398 for (j = 0; j < XVECLEN (x, i); j++)
2399 replace_call_address (XVECEXP (x, i, j), reg, addr);
2403 #endif
2405 /* Return the number of memory refs to addresses that vary
2406 in the rtx X. */
2408 static int
2409 count_nonfixed_reads (const struct loop *loop, rtx x)
2411 enum rtx_code code;
2412 int i;
2413 const char *fmt;
2414 int value;
2416 if (x == 0)
2417 return 0;
2419 code = GET_CODE (x);
2420 switch (code)
2422 case PC:
2423 case CC0:
2424 case CONST_INT:
2425 case CONST_DOUBLE:
2426 case CONST:
2427 case SYMBOL_REF:
2428 case LABEL_REF:
2429 case REG:
2430 return 0;
2432 case MEM:
2433 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2434 + count_nonfixed_reads (loop, XEXP (x, 0)));
2436 default:
2437 break;
2440 value = 0;
2441 fmt = GET_RTX_FORMAT (code);
2442 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2444 if (fmt[i] == 'e')
2445 value += count_nonfixed_reads (loop, XEXP (x, i));
2446 if (fmt[i] == 'E')
2448 int j;
2449 for (j = 0; j < XVECLEN (x, i); j++)
2450 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2453 return value;
2456 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2457 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2458 `unknown_address_altered', `unknown_constant_address_altered', and
2459 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2460 list `store_mems' in LOOP. */
2462 static void
2463 prescan_loop (struct loop *loop)
2465 int level = 1;
2466 rtx insn;
2467 struct loop_info *loop_info = LOOP_INFO (loop);
2468 rtx start = loop->start;
2469 rtx end = loop->end;
2470 /* The label after END. Jumping here is just like falling off the
2471 end of the loop. We use next_nonnote_insn instead of next_label
2472 as a hedge against the (pathological) case where some actual insn
2473 might end up between the two. */
2474 rtx exit_target = next_nonnote_insn (end);
2476 loop_info->has_indirect_jump = indirect_jump_in_function;
2477 loop_info->pre_header_has_call = 0;
2478 loop_info->has_call = 0;
2479 loop_info->has_nonconst_call = 0;
2480 loop_info->has_prefetch = 0;
2481 loop_info->has_volatile = 0;
2482 loop_info->has_tablejump = 0;
2483 loop_info->has_multiple_exit_targets = 0;
2484 loop->level = 1;
2486 loop_info->unknown_address_altered = 0;
2487 loop_info->unknown_constant_address_altered = 0;
2488 loop_info->store_mems = NULL_RTX;
2489 loop_info->first_loop_store_insn = NULL_RTX;
2490 loop_info->mems_idx = 0;
2491 loop_info->num_mem_sets = 0;
2492 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2493 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2495 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2496 insn = PREV_INSN (insn))
2498 if (GET_CODE (insn) == CALL_INSN)
2500 loop_info->pre_header_has_call = 1;
2501 break;
2505 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2506 insn = NEXT_INSN (insn))
2508 switch (GET_CODE (insn))
2510 case NOTE:
2511 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2513 ++level;
2514 /* Count number of loops contained in this one. */
2515 loop->level++;
2517 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2518 --level;
2519 break;
2521 case CALL_INSN:
2522 if (! CONST_OR_PURE_CALL_P (insn))
2524 loop_info->unknown_address_altered = 1;
2525 loop_info->has_nonconst_call = 1;
2527 else if (pure_call_p (insn))
2528 loop_info->has_nonconst_call = 1;
2529 loop_info->has_call = 1;
2530 if (can_throw_internal (insn))
2531 loop_info->has_multiple_exit_targets = 1;
2533 /* Calls initializing constant objects have CLOBBER of MEM /u in the
2534 attached FUNCTION_USAGE expression list, not accounted for by the
2535 code above. We should note these to avoid missing dependencies in
2536 later references. */
2538 rtx fusage_entry;
2540 for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn);
2541 fusage_entry; fusage_entry = XEXP (fusage_entry, 1))
2543 rtx fusage = XEXP (fusage_entry, 0);
2545 if (GET_CODE (fusage) == CLOBBER
2546 && GET_CODE (XEXP (fusage, 0)) == MEM
2547 && RTX_UNCHANGING_P (XEXP (fusage, 0)))
2549 note_stores (fusage, note_addr_stored, loop_info);
2550 if (! loop_info->first_loop_store_insn
2551 && loop_info->store_mems)
2552 loop_info->first_loop_store_insn = insn;
2556 break;
2558 case JUMP_INSN:
2559 if (! loop_info->has_multiple_exit_targets)
2561 rtx set = pc_set (insn);
2563 if (set)
2565 rtx src = SET_SRC (set);
2566 rtx label1, label2;
2568 if (GET_CODE (src) == IF_THEN_ELSE)
2570 label1 = XEXP (src, 1);
2571 label2 = XEXP (src, 2);
2573 else
2575 label1 = src;
2576 label2 = NULL_RTX;
2581 if (label1 && label1 != pc_rtx)
2583 if (GET_CODE (label1) != LABEL_REF)
2585 /* Something tricky. */
2586 loop_info->has_multiple_exit_targets = 1;
2587 break;
2589 else if (XEXP (label1, 0) != exit_target
2590 && LABEL_OUTSIDE_LOOP_P (label1))
2592 /* A jump outside the current loop. */
2593 loop_info->has_multiple_exit_targets = 1;
2594 break;
2598 label1 = label2;
2599 label2 = NULL_RTX;
2601 while (label1);
2603 else
2605 /* A return, or something tricky. */
2606 loop_info->has_multiple_exit_targets = 1;
2609 /* FALLTHRU */
2611 case INSN:
2612 if (volatile_refs_p (PATTERN (insn)))
2613 loop_info->has_volatile = 1;
2615 if (GET_CODE (insn) == JUMP_INSN
2616 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2617 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2618 loop_info->has_tablejump = 1;
2620 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2621 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2622 loop_info->first_loop_store_insn = insn;
2624 if (flag_non_call_exceptions && can_throw_internal (insn))
2625 loop_info->has_multiple_exit_targets = 1;
2626 break;
2628 default:
2629 break;
2633 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2634 if (/* An exception thrown by a called function might land us
2635 anywhere. */
2636 ! loop_info->has_nonconst_call
2637 /* We don't want loads for MEMs moved to a location before the
2638 one at which their stack memory becomes allocated. (Note
2639 that this is not a problem for malloc, etc., since those
2640 require actual function calls. */
2641 && ! current_function_calls_alloca
2642 /* There are ways to leave the loop other than falling off the
2643 end. */
2644 && ! loop_info->has_multiple_exit_targets)
2645 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2646 insn = NEXT_INSN (insn))
2647 for_each_rtx (&insn, insert_loop_mem, loop_info);
2649 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2650 that loop_invariant_p and load_mems can use true_dependence
2651 to determine what is really clobbered. */
2652 if (loop_info->unknown_address_altered)
2654 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2656 loop_info->store_mems
2657 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2659 if (loop_info->unknown_constant_address_altered)
2661 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2663 RTX_UNCHANGING_P (mem) = 1;
2664 loop_info->store_mems
2665 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2669 /* Invalidate all loops containing LABEL. */
2671 static void
2672 invalidate_loops_containing_label (rtx label)
2674 struct loop *loop;
2675 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2676 loop->invalid = 1;
2679 /* Scan the function looking for loops. Record the start and end of each loop.
2680 Also mark as invalid loops any loops that contain a setjmp or are branched
2681 to from outside the loop. */
2683 static void
2684 find_and_verify_loops (rtx f, struct loops *loops)
2686 rtx insn;
2687 rtx label;
2688 int num_loops;
2689 struct loop *current_loop;
2690 struct loop *next_loop;
2691 struct loop *loop;
2693 num_loops = loops->num;
2695 compute_luids (f, NULL_RTX, 0);
2697 /* If there are jumps to undefined labels,
2698 treat them as jumps out of any/all loops.
2699 This also avoids writing past end of tables when there are no loops. */
2700 uid_loop[0] = NULL;
2702 /* Find boundaries of loops, mark which loops are contained within
2703 loops, and invalidate loops that have setjmp. */
2705 num_loops = 0;
2706 current_loop = NULL;
2707 for (insn = f; insn; insn = NEXT_INSN (insn))
2709 if (GET_CODE (insn) == NOTE)
2710 switch (NOTE_LINE_NUMBER (insn))
2712 case NOTE_INSN_LOOP_BEG:
2713 next_loop = loops->array + num_loops;
2714 next_loop->num = num_loops;
2715 num_loops++;
2716 next_loop->start = insn;
2717 next_loop->outer = current_loop;
2718 current_loop = next_loop;
2719 break;
2721 case NOTE_INSN_LOOP_CONT:
2722 current_loop->cont = insn;
2723 break;
2725 case NOTE_INSN_LOOP_VTOP:
2726 current_loop->vtop = insn;
2727 break;
2729 case NOTE_INSN_LOOP_END:
2730 if (! current_loop)
2731 abort ();
2733 current_loop->end = insn;
2734 current_loop = current_loop->outer;
2735 break;
2737 default:
2738 break;
2741 if (GET_CODE (insn) == CALL_INSN
2742 && find_reg_note (insn, REG_SETJMP, NULL))
2744 /* In this case, we must invalidate our current loop and any
2745 enclosing loop. */
2746 for (loop = current_loop; loop; loop = loop->outer)
2748 loop->invalid = 1;
2749 if (loop_dump_stream)
2750 fprintf (loop_dump_stream,
2751 "\nLoop at %d ignored due to setjmp.\n",
2752 INSN_UID (loop->start));
2756 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2757 enclosing loop, but this doesn't matter. */
2758 uid_loop[INSN_UID (insn)] = current_loop;
2761 /* Any loop containing a label used in an initializer must be invalidated,
2762 because it can be jumped into from anywhere. */
2763 for (label = forced_labels; label; label = XEXP (label, 1))
2764 invalidate_loops_containing_label (XEXP (label, 0));
2766 /* Any loop containing a label used for an exception handler must be
2767 invalidated, because it can be jumped into from anywhere. */
2768 for_each_eh_label (invalidate_loops_containing_label);
2770 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2771 loop that it is not contained within, that loop is marked invalid.
2772 If any INSN or CALL_INSN uses a label's address, then the loop containing
2773 that label is marked invalid, because it could be jumped into from
2774 anywhere.
2776 Also look for blocks of code ending in an unconditional branch that
2777 exits the loop. If such a block is surrounded by a conditional
2778 branch around the block, move the block elsewhere (see below) and
2779 invert the jump to point to the code block. This may eliminate a
2780 label in our loop and will simplify processing by both us and a
2781 possible second cse pass. */
2783 for (insn = f; insn; insn = NEXT_INSN (insn))
2784 if (INSN_P (insn))
2786 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2788 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2790 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2791 if (note)
2792 invalidate_loops_containing_label (XEXP (note, 0));
2795 if (GET_CODE (insn) != JUMP_INSN)
2796 continue;
2798 mark_loop_jump (PATTERN (insn), this_loop);
2800 /* See if this is an unconditional branch outside the loop. */
2801 if (this_loop
2802 && (GET_CODE (PATTERN (insn)) == RETURN
2803 || (any_uncondjump_p (insn)
2804 && onlyjump_p (insn)
2805 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2806 != this_loop)))
2807 && get_max_uid () < max_uid_for_loop)
2809 rtx p;
2810 rtx our_next = next_real_insn (insn);
2811 rtx last_insn_to_move = NEXT_INSN (insn);
2812 struct loop *dest_loop;
2813 struct loop *outer_loop = NULL;
2815 /* Go backwards until we reach the start of the loop, a label,
2816 or a JUMP_INSN. */
2817 for (p = PREV_INSN (insn);
2818 GET_CODE (p) != CODE_LABEL
2819 && ! (GET_CODE (p) == NOTE
2820 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2821 && GET_CODE (p) != JUMP_INSN;
2822 p = PREV_INSN (p))
2825 /* Check for the case where we have a jump to an inner nested
2826 loop, and do not perform the optimization in that case. */
2828 if (JUMP_LABEL (insn))
2830 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2831 if (dest_loop)
2833 for (outer_loop = dest_loop; outer_loop;
2834 outer_loop = outer_loop->outer)
2835 if (outer_loop == this_loop)
2836 break;
2840 /* Make sure that the target of P is within the current loop. */
2842 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2843 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2844 outer_loop = this_loop;
2846 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2847 we have a block of code to try to move.
2849 We look backward and then forward from the target of INSN
2850 to find a BARRIER at the same loop depth as the target.
2851 If we find such a BARRIER, we make a new label for the start
2852 of the block, invert the jump in P and point it to that label,
2853 and move the block of code to the spot we found. */
2855 if (! outer_loop
2856 && GET_CODE (p) == JUMP_INSN
2857 && JUMP_LABEL (p) != 0
2858 /* Just ignore jumps to labels that were never emitted.
2859 These always indicate compilation errors. */
2860 && INSN_UID (JUMP_LABEL (p)) != 0
2861 && any_condjump_p (p) && onlyjump_p (p)
2862 && next_real_insn (JUMP_LABEL (p)) == our_next
2863 /* If it's not safe to move the sequence, then we
2864 mustn't try. */
2865 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2866 &last_insn_to_move))
2868 rtx target
2869 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2870 struct loop *target_loop = uid_loop[INSN_UID (target)];
2871 rtx loc, loc2;
2872 rtx tmp;
2874 /* Search for possible garbage past the conditional jumps
2875 and look for the last barrier. */
2876 for (tmp = last_insn_to_move;
2877 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2878 if (GET_CODE (tmp) == BARRIER)
2879 last_insn_to_move = tmp;
2881 for (loc = target; loc; loc = PREV_INSN (loc))
2882 if (GET_CODE (loc) == BARRIER
2883 /* Don't move things inside a tablejump. */
2884 && ((loc2 = next_nonnote_insn (loc)) == 0
2885 || GET_CODE (loc2) != CODE_LABEL
2886 || (loc2 = next_nonnote_insn (loc2)) == 0
2887 || GET_CODE (loc2) != JUMP_INSN
2888 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2889 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2890 && uid_loop[INSN_UID (loc)] == target_loop)
2891 break;
2893 if (loc == 0)
2894 for (loc = target; loc; loc = NEXT_INSN (loc))
2895 if (GET_CODE (loc) == BARRIER
2896 /* Don't move things inside a tablejump. */
2897 && ((loc2 = next_nonnote_insn (loc)) == 0
2898 || GET_CODE (loc2) != CODE_LABEL
2899 || (loc2 = next_nonnote_insn (loc2)) == 0
2900 || GET_CODE (loc2) != JUMP_INSN
2901 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2902 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2903 && uid_loop[INSN_UID (loc)] == target_loop)
2904 break;
2906 if (loc)
2908 rtx cond_label = JUMP_LABEL (p);
2909 rtx new_label = get_label_after (p);
2911 /* Ensure our label doesn't go away. */
2912 LABEL_NUSES (cond_label)++;
2914 /* Verify that uid_loop is large enough and that
2915 we can invert P. */
2916 if (invert_jump (p, new_label, 1))
2918 rtx q, r;
2920 /* If no suitable BARRIER was found, create a suitable
2921 one before TARGET. Since TARGET is a fall through
2922 path, we'll need to insert a jump around our block
2923 and add a BARRIER before TARGET.
2925 This creates an extra unconditional jump outside
2926 the loop. However, the benefits of removing rarely
2927 executed instructions from inside the loop usually
2928 outweighs the cost of the extra unconditional jump
2929 outside the loop. */
2930 if (loc == 0)
2932 rtx temp;
2934 temp = gen_jump (JUMP_LABEL (insn));
2935 temp = emit_jump_insn_before (temp, target);
2936 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2937 LABEL_NUSES (JUMP_LABEL (insn))++;
2938 loc = emit_barrier_before (target);
2941 /* Include the BARRIER after INSN and copy the
2942 block after LOC. */
2943 if (squeeze_notes (&new_label, &last_insn_to_move))
2944 abort ();
2945 reorder_insns (new_label, last_insn_to_move, loc);
2947 /* All those insns are now in TARGET_LOOP. */
2948 for (q = new_label;
2949 q != NEXT_INSN (last_insn_to_move);
2950 q = NEXT_INSN (q))
2951 uid_loop[INSN_UID (q)] = target_loop;
2953 /* The label jumped to by INSN is no longer a loop
2954 exit. Unless INSN does not have a label (e.g.,
2955 it is a RETURN insn), search loop->exit_labels
2956 to find its label_ref, and remove it. Also turn
2957 off LABEL_OUTSIDE_LOOP_P bit. */
2958 if (JUMP_LABEL (insn))
2960 for (q = 0, r = this_loop->exit_labels;
2962 q = r, r = LABEL_NEXTREF (r))
2963 if (XEXP (r, 0) == JUMP_LABEL (insn))
2965 LABEL_OUTSIDE_LOOP_P (r) = 0;
2966 if (q)
2967 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2968 else
2969 this_loop->exit_labels = LABEL_NEXTREF (r);
2970 break;
2973 for (loop = this_loop; loop && loop != target_loop;
2974 loop = loop->outer)
2975 loop->exit_count--;
2977 /* If we didn't find it, then something is
2978 wrong. */
2979 if (! r)
2980 abort ();
2983 /* P is now a jump outside the loop, so it must be put
2984 in loop->exit_labels, and marked as such.
2985 The easiest way to do this is to just call
2986 mark_loop_jump again for P. */
2987 mark_loop_jump (PATTERN (p), this_loop);
2989 /* If INSN now jumps to the insn after it,
2990 delete INSN. */
2991 if (JUMP_LABEL (insn) != 0
2992 && (next_real_insn (JUMP_LABEL (insn))
2993 == next_real_insn (insn)))
2994 delete_related_insns (insn);
2997 /* Continue the loop after where the conditional
2998 branch used to jump, since the only branch insn
2999 in the block (if it still remains) is an inter-loop
3000 branch and hence needs no processing. */
3001 insn = NEXT_INSN (cond_label);
3003 if (--LABEL_NUSES (cond_label) == 0)
3004 delete_related_insns (cond_label);
3006 /* This loop will be continued with NEXT_INSN (insn). */
3007 insn = PREV_INSN (insn);
3014 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3015 loops it is contained in, mark the target loop invalid.
3017 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3019 static void
3020 mark_loop_jump (rtx x, struct loop *loop)
3022 struct loop *dest_loop;
3023 struct loop *outer_loop;
3024 int i;
3026 switch (GET_CODE (x))
3028 case PC:
3029 case USE:
3030 case CLOBBER:
3031 case REG:
3032 case MEM:
3033 case CONST_INT:
3034 case CONST_DOUBLE:
3035 case RETURN:
3036 return;
3038 case CONST:
3039 /* There could be a label reference in here. */
3040 mark_loop_jump (XEXP (x, 0), loop);
3041 return;
3043 case PLUS:
3044 case MINUS:
3045 case MULT:
3046 mark_loop_jump (XEXP (x, 0), loop);
3047 mark_loop_jump (XEXP (x, 1), loop);
3048 return;
3050 case LO_SUM:
3051 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3052 mark_loop_jump (XEXP (x, 1), loop);
3053 return;
3055 case SIGN_EXTEND:
3056 case ZERO_EXTEND:
3057 mark_loop_jump (XEXP (x, 0), loop);
3058 return;
3060 case LABEL_REF:
3061 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3063 /* Link together all labels that branch outside the loop. This
3064 is used by final_[bg]iv_value and the loop unrolling code. Also
3065 mark this LABEL_REF so we know that this branch should predict
3066 false. */
3068 /* A check to make sure the label is not in an inner nested loop,
3069 since this does not count as a loop exit. */
3070 if (dest_loop)
3072 for (outer_loop = dest_loop; outer_loop;
3073 outer_loop = outer_loop->outer)
3074 if (outer_loop == loop)
3075 break;
3077 else
3078 outer_loop = NULL;
3080 if (loop && ! outer_loop)
3082 LABEL_OUTSIDE_LOOP_P (x) = 1;
3083 LABEL_NEXTREF (x) = loop->exit_labels;
3084 loop->exit_labels = x;
3086 for (outer_loop = loop;
3087 outer_loop && outer_loop != dest_loop;
3088 outer_loop = outer_loop->outer)
3089 outer_loop->exit_count++;
3092 /* If this is inside a loop, but not in the current loop or one enclosed
3093 by it, it invalidates at least one loop. */
3095 if (! dest_loop)
3096 return;
3098 /* We must invalidate every nested loop containing the target of this
3099 label, except those that also contain the jump insn. */
3101 for (; dest_loop; dest_loop = dest_loop->outer)
3103 /* Stop when we reach a loop that also contains the jump insn. */
3104 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3105 if (dest_loop == outer_loop)
3106 return;
3108 /* If we get here, we know we need to invalidate a loop. */
3109 if (loop_dump_stream && ! dest_loop->invalid)
3110 fprintf (loop_dump_stream,
3111 "\nLoop at %d ignored due to multiple entry points.\n",
3112 INSN_UID (dest_loop->start));
3114 dest_loop->invalid = 1;
3116 return;
3118 case SET:
3119 /* If this is not setting pc, ignore. */
3120 if (SET_DEST (x) == pc_rtx)
3121 mark_loop_jump (SET_SRC (x), loop);
3122 return;
3124 case IF_THEN_ELSE:
3125 mark_loop_jump (XEXP (x, 1), loop);
3126 mark_loop_jump (XEXP (x, 2), loop);
3127 return;
3129 case PARALLEL:
3130 case ADDR_VEC:
3131 for (i = 0; i < XVECLEN (x, 0); i++)
3132 mark_loop_jump (XVECEXP (x, 0, i), loop);
3133 return;
3135 case ADDR_DIFF_VEC:
3136 for (i = 0; i < XVECLEN (x, 1); i++)
3137 mark_loop_jump (XVECEXP (x, 1, i), loop);
3138 return;
3140 default:
3141 /* Strictly speaking this is not a jump into the loop, only a possible
3142 jump out of the loop. However, we have no way to link the destination
3143 of this jump onto the list of exit labels. To be safe we mark this
3144 loop and any containing loops as invalid. */
3145 if (loop)
3147 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3149 if (loop_dump_stream && ! outer_loop->invalid)
3150 fprintf (loop_dump_stream,
3151 "\nLoop at %d ignored due to unknown exit jump.\n",
3152 INSN_UID (outer_loop->start));
3153 outer_loop->invalid = 1;
3156 return;
3160 /* Return nonzero if there is a label in the range from
3161 insn INSN to and including the insn whose luid is END
3162 INSN must have an assigned luid (i.e., it must not have
3163 been previously created by loop.c). */
3165 static int
3166 labels_in_range_p (rtx insn, int end)
3168 while (insn && INSN_LUID (insn) <= end)
3170 if (GET_CODE (insn) == CODE_LABEL)
3171 return 1;
3172 insn = NEXT_INSN (insn);
3175 return 0;
3178 /* Record that a memory reference X is being set. */
3180 static void
3181 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3182 void *data ATTRIBUTE_UNUSED)
3184 struct loop_info *loop_info = data;
3186 if (x == 0 || GET_CODE (x) != MEM)
3187 return;
3189 /* Count number of memory writes.
3190 This affects heuristics in strength_reduce. */
3191 loop_info->num_mem_sets++;
3193 /* BLKmode MEM means all memory is clobbered. */
3194 if (GET_MODE (x) == BLKmode)
3196 if (RTX_UNCHANGING_P (x))
3197 loop_info->unknown_constant_address_altered = 1;
3198 else
3199 loop_info->unknown_address_altered = 1;
3201 return;
3204 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3205 loop_info->store_mems);
3208 /* X is a value modified by an INSN that references a biv inside a loop
3209 exit test (ie, X is somehow related to the value of the biv). If X
3210 is a pseudo that is used more than once, then the biv is (effectively)
3211 used more than once. DATA is a pointer to a loop_regs structure. */
3213 static void
3214 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3216 struct loop_regs *regs = (struct loop_regs *) data;
3218 if (x == 0)
3219 return;
3221 while (GET_CODE (x) == STRICT_LOW_PART
3222 || GET_CODE (x) == SIGN_EXTRACT
3223 || GET_CODE (x) == ZERO_EXTRACT
3224 || GET_CODE (x) == SUBREG)
3225 x = XEXP (x, 0);
3227 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3228 return;
3230 /* If we do not have usage information, or if we know the register
3231 is used more than once, note that fact for check_dbra_loop. */
3232 if (REGNO (x) >= max_reg_before_loop
3233 || ! regs->array[REGNO (x)].single_usage
3234 || regs->array[REGNO (x)].single_usage == const0_rtx)
3235 regs->multiple_uses = 1;
3238 /* Return nonzero if the rtx X is invariant over the current loop.
3240 The value is 2 if we refer to something only conditionally invariant.
3242 A memory ref is invariant if it is not volatile and does not conflict
3243 with anything stored in `loop_info->store_mems'. */
3246 loop_invariant_p (const struct loop *loop, rtx x)
3248 struct loop_info *loop_info = LOOP_INFO (loop);
3249 struct loop_regs *regs = LOOP_REGS (loop);
3250 int i;
3251 enum rtx_code code;
3252 const char *fmt;
3253 int conditional = 0;
3254 rtx mem_list_entry;
3256 if (x == 0)
3257 return 1;
3258 code = GET_CODE (x);
3259 switch (code)
3261 case CONST_INT:
3262 case CONST_DOUBLE:
3263 case SYMBOL_REF:
3264 case CONST:
3265 return 1;
3267 case LABEL_REF:
3268 /* A LABEL_REF is normally invariant, however, if we are unrolling
3269 loops, and this label is inside the loop, then it isn't invariant.
3270 This is because each unrolled copy of the loop body will have
3271 a copy of this label. If this was invariant, then an insn loading
3272 the address of this label into a register might get moved outside
3273 the loop, and then each loop body would end up using the same label.
3275 We don't know the loop bounds here though, so just fail for all
3276 labels. */
3277 if (flag_old_unroll_loops)
3278 return 0;
3279 else
3280 return 1;
3282 case PC:
3283 case CC0:
3284 case UNSPEC_VOLATILE:
3285 return 0;
3287 case REG:
3288 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3289 since the reg might be set by initialization within the loop. */
3291 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3292 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3293 && ! current_function_has_nonlocal_goto)
3294 return 1;
3296 if (LOOP_INFO (loop)->has_call
3297 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3298 return 0;
3300 /* Out-of-range regs can occur when we are called from unrolling.
3301 These have always been created by the unroller and are set in
3302 the loop, hence are never invariant. */
3304 if (REGNO (x) >= (unsigned) regs->num)
3305 return 0;
3307 if (regs->array[REGNO (x)].set_in_loop < 0)
3308 return 2;
3310 return regs->array[REGNO (x)].set_in_loop == 0;
3312 case MEM:
3313 /* Volatile memory references must be rejected. Do this before
3314 checking for read-only items, so that volatile read-only items
3315 will be rejected also. */
3316 if (MEM_VOLATILE_P (x))
3317 return 0;
3319 /* See if there is any dependence between a store and this load. */
3320 mem_list_entry = loop_info->store_mems;
3321 while (mem_list_entry)
3323 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3324 x, rtx_varies_p))
3325 return 0;
3327 mem_list_entry = XEXP (mem_list_entry, 1);
3330 /* It's not invalidated by a store in memory
3331 but we must still verify the address is invariant. */
3332 break;
3334 case ASM_OPERANDS:
3335 /* Don't mess with insns declared volatile. */
3336 if (MEM_VOLATILE_P (x))
3337 return 0;
3338 break;
3340 default:
3341 break;
3344 fmt = GET_RTX_FORMAT (code);
3345 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3347 if (fmt[i] == 'e')
3349 int tem = loop_invariant_p (loop, XEXP (x, i));
3350 if (tem == 0)
3351 return 0;
3352 if (tem == 2)
3353 conditional = 1;
3355 else if (fmt[i] == 'E')
3357 int j;
3358 for (j = 0; j < XVECLEN (x, i); j++)
3360 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3361 if (tem == 0)
3362 return 0;
3363 if (tem == 2)
3364 conditional = 1;
3370 return 1 + conditional;
3373 /* Return nonzero if all the insns in the loop that set REG
3374 are INSN and the immediately following insns,
3375 and if each of those insns sets REG in an invariant way
3376 (not counting uses of REG in them).
3378 The value is 2 if some of these insns are only conditionally invariant.
3380 We assume that INSN itself is the first set of REG
3381 and that its source is invariant. */
3383 static int
3384 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3385 rtx insn)
3387 struct loop_regs *regs = LOOP_REGS (loop);
3388 rtx p = insn;
3389 unsigned int regno = REGNO (reg);
3390 rtx temp;
3391 /* Number of sets we have to insist on finding after INSN. */
3392 int count = n_sets - 1;
3393 int old = regs->array[regno].set_in_loop;
3394 int value = 0;
3395 int this;
3397 /* If N_SETS hit the limit, we can't rely on its value. */
3398 if (n_sets == 127)
3399 return 0;
3401 regs->array[regno].set_in_loop = 0;
3403 while (count > 0)
3405 enum rtx_code code;
3406 rtx set;
3408 p = NEXT_INSN (p);
3409 code = GET_CODE (p);
3411 /* If library call, skip to end of it. */
3412 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3413 p = XEXP (temp, 0);
3415 this = 0;
3416 if (code == INSN
3417 && (set = single_set (p))
3418 && GET_CODE (SET_DEST (set)) == REG
3419 && REGNO (SET_DEST (set)) == regno)
3421 this = loop_invariant_p (loop, SET_SRC (set));
3422 if (this != 0)
3423 value |= this;
3424 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3426 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3427 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3428 notes are OK. */
3429 this = (CONSTANT_P (XEXP (temp, 0))
3430 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3431 && loop_invariant_p (loop, XEXP (temp, 0))));
3432 if (this != 0)
3433 value |= this;
3436 if (this != 0)
3437 count--;
3438 else if (code != NOTE)
3440 regs->array[regno].set_in_loop = old;
3441 return 0;
3445 regs->array[regno].set_in_loop = old;
3446 /* If loop_invariant_p ever returned 2, we return 2. */
3447 return 1 + (value & 2);
3450 #if 0
3451 /* I don't think this condition is sufficient to allow INSN
3452 to be moved, so we no longer test it. */
3454 /* Return 1 if all insns in the basic block of INSN and following INSN
3455 that set REG are invariant according to TABLE. */
3457 static int
3458 all_sets_invariant_p (rtx reg, rtx insn, short *table)
3460 rtx p = insn;
3461 int regno = REGNO (reg);
3463 while (1)
3465 enum rtx_code code;
3466 p = NEXT_INSN (p);
3467 code = GET_CODE (p);
3468 if (code == CODE_LABEL || code == JUMP_INSN)
3469 return 1;
3470 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3471 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3472 && REGNO (SET_DEST (PATTERN (p))) == regno)
3474 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3475 return 0;
3479 #endif /* 0 */
3481 /* Look at all uses (not sets) of registers in X. For each, if it is
3482 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3483 a different insn, set USAGE[REGNO] to const0_rtx. */
3485 static void
3486 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3488 enum rtx_code code = GET_CODE (x);
3489 const char *fmt = GET_RTX_FORMAT (code);
3490 int i, j;
3492 if (code == REG)
3493 regs->array[REGNO (x)].single_usage
3494 = (regs->array[REGNO (x)].single_usage != 0
3495 && regs->array[REGNO (x)].single_usage != insn)
3496 ? const0_rtx : insn;
3498 else if (code == SET)
3500 /* Don't count SET_DEST if it is a REG; otherwise count things
3501 in SET_DEST because if a register is partially modified, it won't
3502 show up as a potential movable so we don't care how USAGE is set
3503 for it. */
3504 if (GET_CODE (SET_DEST (x)) != REG)
3505 find_single_use_in_loop (regs, insn, SET_DEST (x));
3506 find_single_use_in_loop (regs, insn, SET_SRC (x));
3508 else
3509 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3511 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3512 find_single_use_in_loop (regs, insn, XEXP (x, i));
3513 else if (fmt[i] == 'E')
3514 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3515 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3519 /* Count and record any set in X which is contained in INSN. Update
3520 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3521 in X. */
3523 static void
3524 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3526 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3527 /* Don't move a reg that has an explicit clobber.
3528 It's not worth the pain to try to do it correctly. */
3529 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3531 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3533 rtx dest = SET_DEST (x);
3534 while (GET_CODE (dest) == SUBREG
3535 || GET_CODE (dest) == ZERO_EXTRACT
3536 || GET_CODE (dest) == SIGN_EXTRACT
3537 || GET_CODE (dest) == STRICT_LOW_PART)
3538 dest = XEXP (dest, 0);
3539 if (GET_CODE (dest) == REG)
3541 int i;
3542 int regno = REGNO (dest);
3543 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3545 /* If this is the first setting of this reg
3546 in current basic block, and it was set before,
3547 it must be set in two basic blocks, so it cannot
3548 be moved out of the loop. */
3549 if (regs->array[regno].set_in_loop > 0
3550 && last_set[regno] == 0)
3551 regs->array[regno+i].may_not_optimize = 1;
3552 /* If this is not first setting in current basic block,
3553 see if reg was used in between previous one and this.
3554 If so, neither one can be moved. */
3555 if (last_set[regno] != 0
3556 && reg_used_between_p (dest, last_set[regno], insn))
3557 regs->array[regno+i].may_not_optimize = 1;
3558 if (regs->array[regno+i].set_in_loop < 127)
3559 ++regs->array[regno+i].set_in_loop;
3560 last_set[regno+i] = insn;
3566 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3567 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3568 contained in insn INSN is used by any insn that precedes INSN in
3569 cyclic order starting from the loop entry point.
3571 We don't want to use INSN_LUID here because if we restrict INSN to those
3572 that have a valid INSN_LUID, it means we cannot move an invariant out
3573 from an inner loop past two loops. */
3575 static int
3576 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3578 rtx reg = SET_DEST (set);
3579 rtx p;
3581 /* Scan forward checking for register usage. If we hit INSN, we
3582 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3583 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3585 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3586 return 1;
3588 if (p == loop->end)
3589 p = loop->start;
3592 return 0;
3596 /* Information we collect about arrays that we might want to prefetch. */
3597 struct prefetch_info
3599 struct iv_class *class; /* Class this prefetch is based on. */
3600 struct induction *giv; /* GIV this prefetch is based on. */
3601 rtx base_address; /* Start prefetching from this address plus
3602 index. */
3603 HOST_WIDE_INT index;
3604 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3605 iteration. */
3606 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3607 prefetch area in one iteration. */
3608 unsigned int total_bytes; /* Total bytes loop will access in this block.
3609 This is set only for loops with known
3610 iteration counts and is 0xffffffff
3611 otherwise. */
3612 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3613 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3614 unsigned int write : 1; /* 1 for read/write prefetches. */
3617 /* Data used by check_store function. */
3618 struct check_store_data
3620 rtx mem_address;
3621 int mem_write;
3624 static void check_store (rtx, rtx, void *);
3625 static void emit_prefetch_instructions (struct loop *);
3626 static int rtx_equal_for_prefetch_p (rtx, rtx);
3628 /* Set mem_write when mem_address is found. Used as callback to
3629 note_stores. */
3630 static void
3631 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3633 struct check_store_data *d = (struct check_store_data *) data;
3635 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3636 d->mem_write = 1;
3639 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3640 important to get some addresses combined. Later more sophisticated
3641 transformations can be added when necessary.
3643 ??? Same trick with swapping operand is done at several other places.
3644 It can be nice to develop some common way to handle this. */
3646 static int
3647 rtx_equal_for_prefetch_p (rtx x, rtx y)
3649 int i;
3650 int j;
3651 enum rtx_code code = GET_CODE (x);
3652 const char *fmt;
3654 if (x == y)
3655 return 1;
3656 if (code != GET_CODE (y))
3657 return 0;
3659 code = GET_CODE (x);
3661 if (GET_RTX_CLASS (code) == 'c')
3663 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3664 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3665 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3666 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3668 /* Compare the elements. If any pair of corresponding elements fails to
3669 match, return 0 for the whole thing. */
3671 fmt = GET_RTX_FORMAT (code);
3672 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3674 switch (fmt[i])
3676 case 'w':
3677 if (XWINT (x, i) != XWINT (y, i))
3678 return 0;
3679 break;
3681 case 'i':
3682 if (XINT (x, i) != XINT (y, i))
3683 return 0;
3684 break;
3686 case 'E':
3687 /* Two vectors must have the same length. */
3688 if (XVECLEN (x, i) != XVECLEN (y, i))
3689 return 0;
3691 /* And the corresponding elements must match. */
3692 for (j = 0; j < XVECLEN (x, i); j++)
3693 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3694 XVECEXP (y, i, j)) == 0)
3695 return 0;
3696 break;
3698 case 'e':
3699 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3700 return 0;
3701 break;
3703 case 's':
3704 if (strcmp (XSTR (x, i), XSTR (y, i)))
3705 return 0;
3706 break;
3708 case 'u':
3709 /* These are just backpointers, so they don't matter. */
3710 break;
3712 case '0':
3713 break;
3715 /* It is believed that rtx's at this level will never
3716 contain anything but integers and other rtx's,
3717 except for within LABEL_REFs and SYMBOL_REFs. */
3718 default:
3719 abort ();
3722 return 1;
3725 /* Remove constant addition value from the expression X (when present)
3726 and return it. */
3728 static HOST_WIDE_INT
3729 remove_constant_addition (rtx *x)
3731 HOST_WIDE_INT addval = 0;
3732 rtx exp = *x;
3734 /* Avoid clobbering a shared CONST expression. */
3735 if (GET_CODE (exp) == CONST)
3737 if (GET_CODE (XEXP (exp, 0)) == PLUS
3738 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3739 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3741 *x = XEXP (XEXP (exp, 0), 0);
3742 return INTVAL (XEXP (XEXP (exp, 0), 1));
3744 return 0;
3747 if (GET_CODE (exp) == CONST_INT)
3749 addval = INTVAL (exp);
3750 *x = const0_rtx;
3753 /* For plus expression recurse on ourself. */
3754 else if (GET_CODE (exp) == PLUS)
3756 addval += remove_constant_addition (&XEXP (exp, 0));
3757 addval += remove_constant_addition (&XEXP (exp, 1));
3759 /* In case our parameter was constant, remove extra zero from the
3760 expression. */
3761 if (XEXP (exp, 0) == const0_rtx)
3762 *x = XEXP (exp, 1);
3763 else if (XEXP (exp, 1) == const0_rtx)
3764 *x = XEXP (exp, 0);
3767 return addval;
3770 /* Attempt to identify accesses to arrays that are most likely to cause cache
3771 misses, and emit prefetch instructions a few prefetch blocks forward.
3773 To detect the arrays we use the GIV information that was collected by the
3774 strength reduction pass.
3776 The prefetch instructions are generated after the GIV information is done
3777 and before the strength reduction process. The new GIVs are injected into
3778 the strength reduction tables, so the prefetch addresses are optimized as
3779 well.
3781 GIVs are split into base address, stride, and constant addition values.
3782 GIVs with the same address, stride and close addition values are combined
3783 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3784 for write instructions can be used for the block we write to, on machines
3785 that support write prefetches.
3787 Several heuristics are used to determine when to prefetch. They are
3788 controlled by defined symbols that can be overridden for each target. */
3790 static void
3791 emit_prefetch_instructions (struct loop *loop)
3793 int num_prefetches = 0;
3794 int num_real_prefetches = 0;
3795 int num_real_write_prefetches = 0;
3796 int num_prefetches_before = 0;
3797 int num_write_prefetches_before = 0;
3798 int ahead = 0;
3799 int i;
3800 struct iv_class *bl;
3801 struct induction *iv;
3802 struct prefetch_info info[MAX_PREFETCHES];
3803 struct loop_ivs *ivs = LOOP_IVS (loop);
3805 if (!HAVE_prefetch)
3806 return;
3808 /* Consider only loops w/o calls. When a call is done, the loop is probably
3809 slow enough to read the memory. */
3810 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3812 if (loop_dump_stream)
3813 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3815 return;
3818 /* Don't prefetch in loops known to have few iterations. */
3819 if (PREFETCH_NO_LOW_LOOPCNT
3820 && LOOP_INFO (loop)->n_iterations
3821 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3823 if (loop_dump_stream)
3824 fprintf (loop_dump_stream,
3825 "Prefetch: ignoring loop: not enough iterations.\n");
3826 return;
3829 /* Search all induction variables and pick those interesting for the prefetch
3830 machinery. */
3831 for (bl = ivs->list; bl; bl = bl->next)
3833 struct induction *biv = bl->biv, *biv1;
3834 int basestride = 0;
3836 biv1 = biv;
3838 /* Expect all BIVs to be executed in each iteration. This makes our
3839 analysis more conservative. */
3840 while (biv1)
3842 /* Discard non-constant additions that we can't handle well yet, and
3843 BIVs that are executed multiple times; such BIVs ought to be
3844 handled in the nested loop. We accept not_every_iteration BIVs,
3845 since these only result in larger strides and make our
3846 heuristics more conservative. */
3847 if (GET_CODE (biv->add_val) != CONST_INT)
3849 if (loop_dump_stream)
3851 fprintf (loop_dump_stream,
3852 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3853 REGNO (biv->src_reg), INSN_UID (biv->insn));
3854 print_rtl (loop_dump_stream, biv->add_val);
3855 fprintf (loop_dump_stream, "\n");
3857 break;
3860 if (biv->maybe_multiple)
3862 if (loop_dump_stream)
3864 fprintf (loop_dump_stream,
3865 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3866 REGNO (biv->src_reg), INSN_UID (biv->insn));
3867 print_rtl (loop_dump_stream, biv->add_val);
3868 fprintf (loop_dump_stream, "\n");
3870 break;
3873 basestride += INTVAL (biv1->add_val);
3874 biv1 = biv1->next_iv;
3877 if (biv1 || !basestride)
3878 continue;
3880 for (iv = bl->giv; iv; iv = iv->next_iv)
3882 rtx address;
3883 rtx temp;
3884 HOST_WIDE_INT index = 0;
3885 int add = 1;
3886 HOST_WIDE_INT stride = 0;
3887 int stride_sign = 1;
3888 struct check_store_data d;
3889 const char *ignore_reason = NULL;
3890 int size = GET_MODE_SIZE (GET_MODE (iv));
3892 /* See whether an induction variable is interesting to us and if
3893 not, report the reason. */
3894 if (iv->giv_type != DEST_ADDR)
3895 ignore_reason = "giv is not a destination address";
3897 /* We are interested only in constant stride memory references
3898 in order to be able to compute density easily. */
3899 else if (GET_CODE (iv->mult_val) != CONST_INT)
3900 ignore_reason = "stride is not constant";
3902 else
3904 stride = INTVAL (iv->mult_val) * basestride;
3905 if (stride < 0)
3907 stride = -stride;
3908 stride_sign = -1;
3911 /* On some targets, reversed order prefetches are not
3912 worthwhile. */
3913 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3914 ignore_reason = "reversed order stride";
3916 /* Prefetch of accesses with an extreme stride might not be
3917 worthwhile, either. */
3918 else if (PREFETCH_NO_EXTREME_STRIDE
3919 && stride > PREFETCH_EXTREME_STRIDE)
3920 ignore_reason = "extreme stride";
3922 /* Ignore GIVs with varying add values; we can't predict the
3923 value for the next iteration. */
3924 else if (!loop_invariant_p (loop, iv->add_val))
3925 ignore_reason = "giv has varying add value";
3927 /* Ignore GIVs in the nested loops; they ought to have been
3928 handled already. */
3929 else if (iv->maybe_multiple)
3930 ignore_reason = "giv is in nested loop";
3933 if (ignore_reason != NULL)
3935 if (loop_dump_stream)
3936 fprintf (loop_dump_stream,
3937 "Prefetch: ignoring giv at %d: %s.\n",
3938 INSN_UID (iv->insn), ignore_reason);
3939 continue;
3942 /* Determine the pointer to the basic array we are examining. It is
3943 the sum of the BIV's initial value and the GIV's add_val. */
3944 address = copy_rtx (iv->add_val);
3945 temp = copy_rtx (bl->initial_value);
3947 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3948 index = remove_constant_addition (&address);
3950 d.mem_write = 0;
3951 d.mem_address = *iv->location;
3953 /* When the GIV is not always executed, we might be better off by
3954 not dirtying the cache pages. */
3955 if (PREFETCH_CONDITIONAL || iv->always_executed)
3956 note_stores (PATTERN (iv->insn), check_store, &d);
3957 else
3959 if (loop_dump_stream)
3960 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3961 INSN_UID (iv->insn), "in conditional code.");
3962 continue;
3965 /* Attempt to find another prefetch to the same array and see if we
3966 can merge this one. */
3967 for (i = 0; i < num_prefetches; i++)
3968 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3969 && stride == info[i].stride)
3971 /* In case both access same array (same location
3972 just with small difference in constant indexes), merge
3973 the prefetches. Just do the later and the earlier will
3974 get prefetched from previous iteration.
3975 The artificial threshold should not be too small,
3976 but also not bigger than small portion of memory usually
3977 traversed by single loop. */
3978 if (index >= info[i].index
3979 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3981 info[i].write |= d.mem_write;
3982 info[i].bytes_accessed += size;
3983 info[i].index = index;
3984 info[i].giv = iv;
3985 info[i].class = bl;
3986 info[num_prefetches].base_address = address;
3987 add = 0;
3988 break;
3991 if (index < info[i].index
3992 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3994 info[i].write |= d.mem_write;
3995 info[i].bytes_accessed += size;
3996 add = 0;
3997 break;
4001 /* Merging failed. */
4002 if (add)
4004 info[num_prefetches].giv = iv;
4005 info[num_prefetches].class = bl;
4006 info[num_prefetches].index = index;
4007 info[num_prefetches].stride = stride;
4008 info[num_prefetches].base_address = address;
4009 info[num_prefetches].write = d.mem_write;
4010 info[num_prefetches].bytes_accessed = size;
4011 num_prefetches++;
4012 if (num_prefetches >= MAX_PREFETCHES)
4014 if (loop_dump_stream)
4015 fprintf (loop_dump_stream,
4016 "Maximal number of prefetches exceeded.\n");
4017 return;
4023 for (i = 0; i < num_prefetches; i++)
4025 int density;
4027 /* Attempt to calculate the total number of bytes fetched by all
4028 iterations of the loop. Avoid overflow. */
4029 if (LOOP_INFO (loop)->n_iterations
4030 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4031 >= LOOP_INFO (loop)->n_iterations))
4032 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4033 else
4034 info[i].total_bytes = 0xffffffff;
4036 density = info[i].bytes_accessed * 100 / info[i].stride;
4038 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4039 if (PREFETCH_ONLY_DENSE_MEM)
4040 if (density * 256 > PREFETCH_DENSE_MEM * 100
4041 && (info[i].total_bytes / PREFETCH_BLOCK
4042 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4044 info[i].prefetch_before_loop = 1;
4045 info[i].prefetch_in_loop
4046 = (info[i].total_bytes / PREFETCH_BLOCK
4047 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4049 else
4051 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4052 if (loop_dump_stream)
4053 fprintf (loop_dump_stream,
4054 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4055 INSN_UID (info[i].giv->insn), density);
4057 else
4058 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4060 /* Find how many prefetch instructions we'll use within the loop. */
4061 if (info[i].prefetch_in_loop != 0)
4063 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4064 / PREFETCH_BLOCK);
4065 num_real_prefetches += info[i].prefetch_in_loop;
4066 if (info[i].write)
4067 num_real_write_prefetches += info[i].prefetch_in_loop;
4071 /* Determine how many iterations ahead to prefetch within the loop, based
4072 on how many prefetches we currently expect to do within the loop. */
4073 if (num_real_prefetches != 0)
4075 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4077 if (loop_dump_stream)
4078 fprintf (loop_dump_stream,
4079 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4080 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4081 num_real_prefetches = 0, num_real_write_prefetches = 0;
4084 /* We'll also use AHEAD to determine how many prefetch instructions to
4085 emit before a loop, so don't leave it zero. */
4086 if (ahead == 0)
4087 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4089 for (i = 0; i < num_prefetches; i++)
4091 /* Update if we've decided not to prefetch anything within the loop. */
4092 if (num_real_prefetches == 0)
4093 info[i].prefetch_in_loop = 0;
4095 /* Find how many prefetch instructions we'll use before the loop. */
4096 if (info[i].prefetch_before_loop != 0)
4098 int n = info[i].total_bytes / PREFETCH_BLOCK;
4099 if (n > ahead)
4100 n = ahead;
4101 info[i].prefetch_before_loop = n;
4102 num_prefetches_before += n;
4103 if (info[i].write)
4104 num_write_prefetches_before += n;
4107 if (loop_dump_stream)
4109 if (info[i].prefetch_in_loop == 0
4110 && info[i].prefetch_before_loop == 0)
4111 continue;
4112 fprintf (loop_dump_stream, "Prefetch insn: %d",
4113 INSN_UID (info[i].giv->insn));
4114 fprintf (loop_dump_stream,
4115 "; in loop: %d; before: %d; %s\n",
4116 info[i].prefetch_in_loop,
4117 info[i].prefetch_before_loop,
4118 info[i].write ? "read/write" : "read only");
4119 fprintf (loop_dump_stream,
4120 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4121 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4122 info[i].bytes_accessed, info[i].total_bytes);
4123 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4124 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4125 info[i].index, info[i].stride);
4126 print_rtl (loop_dump_stream, info[i].base_address);
4127 fprintf (loop_dump_stream, "\n");
4131 if (num_real_prefetches + num_prefetches_before > 0)
4133 /* Record that this loop uses prefetch instructions. */
4134 LOOP_INFO (loop)->has_prefetch = 1;
4136 if (loop_dump_stream)
4138 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4139 num_real_prefetches, num_real_write_prefetches);
4140 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4141 num_prefetches_before, num_write_prefetches_before);
4145 for (i = 0; i < num_prefetches; i++)
4147 int y;
4149 for (y = 0; y < info[i].prefetch_in_loop; y++)
4151 rtx loc = copy_rtx (*info[i].giv->location);
4152 rtx insn;
4153 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4154 rtx before_insn = info[i].giv->insn;
4155 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4156 rtx seq;
4158 /* We can save some effort by offsetting the address on
4159 architectures with offsettable memory references. */
4160 if (offsettable_address_p (0, VOIDmode, loc))
4161 loc = plus_constant (loc, bytes_ahead);
4162 else
4164 rtx reg = gen_reg_rtx (Pmode);
4165 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4166 GEN_INT (bytes_ahead), reg,
4167 0, before_insn);
4168 loc = reg;
4171 start_sequence ();
4172 /* Make sure the address operand is valid for prefetch. */
4173 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4174 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4175 loc = force_reg (Pmode, loc);
4176 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4177 GEN_INT (3)));
4178 seq = get_insns ();
4179 end_sequence ();
4180 emit_insn_before (seq, before_insn);
4182 /* Check all insns emitted and record the new GIV
4183 information. */
4184 insn = NEXT_INSN (prev_insn);
4185 while (insn != before_insn)
4187 insn = check_insn_for_givs (loop, insn,
4188 info[i].giv->always_executed,
4189 info[i].giv->maybe_multiple);
4190 insn = NEXT_INSN (insn);
4194 if (PREFETCH_BEFORE_LOOP)
4196 /* Emit insns before the loop to fetch the first cache lines or,
4197 if we're not prefetching within the loop, everything we expect
4198 to need. */
4199 for (y = 0; y < info[i].prefetch_before_loop; y++)
4201 rtx reg = gen_reg_rtx (Pmode);
4202 rtx loop_start = loop->start;
4203 rtx init_val = info[i].class->initial_value;
4204 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4205 info[i].giv->add_val,
4206 GEN_INT (y * PREFETCH_BLOCK));
4208 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4209 non-constant INIT_VAL to have the same mode as REG, which
4210 in this case we know to be Pmode. */
4211 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4213 rtx seq;
4215 start_sequence ();
4216 init_val = convert_to_mode (Pmode, init_val, 0);
4217 seq = get_insns ();
4218 end_sequence ();
4219 loop_insn_emit_before (loop, 0, loop_start, seq);
4221 loop_iv_add_mult_emit_before (loop, init_val,
4222 info[i].giv->mult_val,
4223 add_val, reg, 0, loop_start);
4224 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4225 GEN_INT (3)),
4226 loop_start);
4231 return;
4234 /* Communication with routines called via `note_stores'. */
4236 static rtx note_insn;
4238 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4240 static rtx addr_placeholder;
4242 /* ??? Unfinished optimizations, and possible future optimizations,
4243 for the strength reduction code. */
4245 /* ??? The interaction of biv elimination, and recognition of 'constant'
4246 bivs, may cause problems. */
4248 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4249 performance problems.
4251 Perhaps don't eliminate things that can be combined with an addressing
4252 mode. Find all givs that have the same biv, mult_val, and add_val;
4253 then for each giv, check to see if its only use dies in a following
4254 memory address. If so, generate a new memory address and check to see
4255 if it is valid. If it is valid, then store the modified memory address,
4256 otherwise, mark the giv as not done so that it will get its own iv. */
4258 /* ??? Could try to optimize branches when it is known that a biv is always
4259 positive. */
4261 /* ??? When replace a biv in a compare insn, we should replace with closest
4262 giv so that an optimized branch can still be recognized by the combiner,
4263 e.g. the VAX acb insn. */
4265 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4266 was rerun in loop_optimize whenever a register was added or moved.
4267 Also, some of the optimizations could be a little less conservative. */
4269 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4270 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4271 callback.
4273 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4274 least once for every loop iteration except for the last one.
4276 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4277 loop iteration.
4279 void
4280 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4282 int not_every_iteration = 0;
4283 int maybe_multiple = 0;
4284 int past_loop_latch = 0;
4285 int loop_depth = 0;
4286 rtx p;
4288 /* If loop_scan_start points to the loop exit test, we have to be wary of
4289 subversive use of gotos inside expression statements. */
4290 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4291 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4293 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4294 for (p = next_insn_in_loop (loop, loop->scan_start);
4295 p != NULL_RTX;
4296 p = next_insn_in_loop (loop, p))
4298 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4300 /* Past CODE_LABEL, we get to insns that may be executed multiple
4301 times. The only way we can be sure that they can't is if every
4302 jump insn between here and the end of the loop either
4303 returns, exits the loop, is a jump to a location that is still
4304 behind the label, or is a jump to the loop start. */
4306 if (GET_CODE (p) == CODE_LABEL)
4308 rtx insn = p;
4310 maybe_multiple = 0;
4312 while (1)
4314 insn = NEXT_INSN (insn);
4315 if (insn == loop->scan_start)
4316 break;
4317 if (insn == loop->end)
4319 if (loop->top != 0)
4320 insn = loop->top;
4321 else
4322 break;
4323 if (insn == loop->scan_start)
4324 break;
4327 if (GET_CODE (insn) == JUMP_INSN
4328 && GET_CODE (PATTERN (insn)) != RETURN
4329 && (!any_condjump_p (insn)
4330 || (JUMP_LABEL (insn) != 0
4331 && JUMP_LABEL (insn) != loop->scan_start
4332 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4334 maybe_multiple = 1;
4335 break;
4340 /* Past a jump, we get to insns for which we can't count
4341 on whether they will be executed during each iteration. */
4342 /* This code appears twice in strength_reduce. There is also similar
4343 code in scan_loop. */
4344 if (GET_CODE (p) == JUMP_INSN
4345 /* If we enter the loop in the middle, and scan around to the
4346 beginning, don't set not_every_iteration for that.
4347 This can be any kind of jump, since we want to know if insns
4348 will be executed if the loop is executed. */
4349 && !(JUMP_LABEL (p) == loop->top
4350 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4351 && any_uncondjump_p (p))
4352 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4354 rtx label = 0;
4356 /* If this is a jump outside the loop, then it also doesn't
4357 matter. Check to see if the target of this branch is on the
4358 loop->exits_labels list. */
4360 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4361 if (XEXP (label, 0) == JUMP_LABEL (p))
4362 break;
4364 if (!label)
4365 not_every_iteration = 1;
4368 else if (GET_CODE (p) == NOTE)
4370 /* At the virtual top of a converted loop, insns are again known to
4371 be executed each iteration: logically, the loop begins here
4372 even though the exit code has been duplicated.
4374 Insns are also again known to be executed each iteration at
4375 the LOOP_CONT note. */
4376 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4377 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4378 && loop_depth == 0)
4379 not_every_iteration = 0;
4380 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4381 loop_depth++;
4382 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4383 loop_depth--;
4386 /* Note if we pass a loop latch. If we do, then we can not clear
4387 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4388 a loop since a jump before the last CODE_LABEL may have started
4389 a new loop iteration.
4391 Note that LOOP_TOP is only set for rotated loops and we need
4392 this check for all loops, so compare against the CODE_LABEL
4393 which immediately follows LOOP_START. */
4394 if (GET_CODE (p) == JUMP_INSN
4395 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4396 past_loop_latch = 1;
4398 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4399 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4400 or not an insn is known to be executed each iteration of the
4401 loop, whether or not any iterations are known to occur.
4403 Therefore, if we have just passed a label and have no more labels
4404 between here and the test insn of the loop, and we have not passed
4405 a jump to the top of the loop, then we know these insns will be
4406 executed each iteration. */
4408 if (not_every_iteration
4409 && !past_loop_latch
4410 && GET_CODE (p) == CODE_LABEL
4411 && no_labels_between_p (p, loop->end)
4412 && loop_insn_first_p (p, loop->cont))
4413 not_every_iteration = 0;
4417 static void
4418 loop_bivs_find (struct loop *loop)
4420 struct loop_regs *regs = LOOP_REGS (loop);
4421 struct loop_ivs *ivs = LOOP_IVS (loop);
4422 /* Temporary list pointers for traversing ivs->list. */
4423 struct iv_class *bl, **backbl;
4425 ivs->list = 0;
4427 for_each_insn_in_loop (loop, check_insn_for_bivs);
4429 /* Scan ivs->list to remove all regs that proved not to be bivs.
4430 Make a sanity check against regs->n_times_set. */
4431 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4433 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4434 /* Above happens if register modified by subreg, etc. */
4435 /* Make sure it is not recognized as a basic induction var: */
4436 || regs->array[bl->regno].n_times_set != bl->biv_count
4437 /* If never incremented, it is invariant that we decided not to
4438 move. So leave it alone. */
4439 || ! bl->incremented)
4441 if (loop_dump_stream)
4442 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4443 bl->regno,
4444 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4445 ? "not induction variable"
4446 : (! bl->incremented ? "never incremented"
4447 : "count error")));
4449 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4450 *backbl = bl->next;
4452 else
4454 backbl = &bl->next;
4456 if (loop_dump_stream)
4457 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4463 /* Determine how BIVS are initialized by looking through pre-header
4464 extended basic block. */
4465 static void
4466 loop_bivs_init_find (struct loop *loop)
4468 struct loop_ivs *ivs = LOOP_IVS (loop);
4469 /* Temporary list pointers for traversing ivs->list. */
4470 struct iv_class *bl;
4471 int call_seen;
4472 rtx p;
4474 /* Find initial value for each biv by searching backwards from loop_start,
4475 halting at first label. Also record any test condition. */
4477 call_seen = 0;
4478 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4480 rtx test;
4482 note_insn = p;
4484 if (GET_CODE (p) == CALL_INSN)
4485 call_seen = 1;
4487 if (INSN_P (p))
4488 note_stores (PATTERN (p), record_initial, ivs);
4490 /* Record any test of a biv that branches around the loop if no store
4491 between it and the start of loop. We only care about tests with
4492 constants and registers and only certain of those. */
4493 if (GET_CODE (p) == JUMP_INSN
4494 && JUMP_LABEL (p) != 0
4495 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4496 && (test = get_condition_for_loop (loop, p)) != 0
4497 && GET_CODE (XEXP (test, 0)) == REG
4498 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4499 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4500 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4501 && bl->init_insn == 0)
4503 /* If an NE test, we have an initial value! */
4504 if (GET_CODE (test) == NE)
4506 bl->init_insn = p;
4507 bl->init_set = gen_rtx_SET (VOIDmode,
4508 XEXP (test, 0), XEXP (test, 1));
4510 else
4511 bl->initial_test = test;
4517 /* Look at the each biv and see if we can say anything better about its
4518 initial value from any initializing insns set up above. (This is done
4519 in two passes to avoid missing SETs in a PARALLEL.) */
4520 static void
4521 loop_bivs_check (struct loop *loop)
4523 struct loop_ivs *ivs = LOOP_IVS (loop);
4524 /* Temporary list pointers for traversing ivs->list. */
4525 struct iv_class *bl;
4526 struct iv_class **backbl;
4528 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4530 rtx src;
4531 rtx note;
4533 if (! bl->init_insn)
4534 continue;
4536 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4537 is a constant, use the value of that. */
4538 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4539 && CONSTANT_P (XEXP (note, 0)))
4540 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4541 && CONSTANT_P (XEXP (note, 0))))
4542 src = XEXP (note, 0);
4543 else
4544 src = SET_SRC (bl->init_set);
4546 if (loop_dump_stream)
4547 fprintf (loop_dump_stream,
4548 "Biv %d: initialized at insn %d: initial value ",
4549 bl->regno, INSN_UID (bl->init_insn));
4551 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4552 || GET_MODE (src) == VOIDmode)
4553 && valid_initial_value_p (src, bl->init_insn,
4554 LOOP_INFO (loop)->pre_header_has_call,
4555 loop->start))
4557 bl->initial_value = src;
4559 if (loop_dump_stream)
4561 print_simple_rtl (loop_dump_stream, src);
4562 fputc ('\n', loop_dump_stream);
4565 /* If we can't make it a giv,
4566 let biv keep initial value of "itself". */
4567 else if (loop_dump_stream)
4568 fprintf (loop_dump_stream, "is complex\n");
4573 /* Search the loop for general induction variables. */
4575 static void
4576 loop_givs_find (struct loop* loop)
4578 for_each_insn_in_loop (loop, check_insn_for_givs);
4582 /* For each giv for which we still don't know whether or not it is
4583 replaceable, check to see if it is replaceable because its final value
4584 can be calculated. */
4586 static void
4587 loop_givs_check (struct loop *loop)
4589 struct loop_ivs *ivs = LOOP_IVS (loop);
4590 struct iv_class *bl;
4592 for (bl = ivs->list; bl; bl = bl->next)
4594 struct induction *v;
4596 for (v = bl->giv; v; v = v->next_iv)
4597 if (! v->replaceable && ! v->not_replaceable)
4598 check_final_value (loop, v);
4603 /* Return nonzero if it is possible to eliminate the biv BL provided
4604 all givs are reduced. This is possible if either the reg is not
4605 used outside the loop, or we can compute what its final value will
4606 be. */
4608 static int
4609 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4610 int threshold, int insn_count)
4612 /* For architectures with a decrement_and_branch_until_zero insn,
4613 don't do this if we put a REG_NONNEG note on the endtest for this
4614 biv. */
4616 #ifdef HAVE_decrement_and_branch_until_zero
4617 if (bl->nonneg)
4619 if (loop_dump_stream)
4620 fprintf (loop_dump_stream,
4621 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4622 return 0;
4624 #endif
4626 /* Check that biv is used outside loop or if it has a final value.
4627 Compare against bl->init_insn rather than loop->start. We aren't
4628 concerned with any uses of the biv between init_insn and
4629 loop->start since these won't be affected by the value of the biv
4630 elsewhere in the function, so long as init_insn doesn't use the
4631 biv itself. */
4633 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4634 && bl->init_insn
4635 && INSN_UID (bl->init_insn) < max_uid_for_loop
4636 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4637 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4638 || (bl->final_value = final_biv_value (loop, bl)))
4639 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4641 if (loop_dump_stream)
4643 fprintf (loop_dump_stream,
4644 "Cannot eliminate biv %d.\n",
4645 bl->regno);
4646 fprintf (loop_dump_stream,
4647 "First use: insn %d, last use: insn %d.\n",
4648 REGNO_FIRST_UID (bl->regno),
4649 REGNO_LAST_UID (bl->regno));
4651 return 0;
4655 /* Reduce each giv of BL that we have decided to reduce. */
4657 static void
4658 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
4660 struct induction *v;
4662 for (v = bl->giv; v; v = v->next_iv)
4664 struct induction *tv;
4665 if (! v->ignore && v->same == 0)
4667 int auto_inc_opt = 0;
4669 /* If the code for derived givs immediately below has already
4670 allocated a new_reg, we must keep it. */
4671 if (! v->new_reg)
4672 v->new_reg = gen_reg_rtx (v->mode);
4674 #ifdef AUTO_INC_DEC
4675 /* If the target has auto-increment addressing modes, and
4676 this is an address giv, then try to put the increment
4677 immediately after its use, so that flow can create an
4678 auto-increment addressing mode. */
4679 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4680 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4681 /* We don't handle reversed biv's because bl->biv->insn
4682 does not have a valid INSN_LUID. */
4683 && ! bl->reversed
4684 && v->always_executed && ! v->maybe_multiple
4685 && INSN_UID (v->insn) < max_uid_for_loop)
4687 /* If other giv's have been combined with this one, then
4688 this will work only if all uses of the other giv's occur
4689 before this giv's insn. This is difficult to check.
4691 We simplify this by looking for the common case where
4692 there is one DEST_REG giv, and this giv's insn is the
4693 last use of the dest_reg of that DEST_REG giv. If the
4694 increment occurs after the address giv, then we can
4695 perform the optimization. (Otherwise, the increment
4696 would have to go before other_giv, and we would not be
4697 able to combine it with the address giv to get an
4698 auto-inc address.) */
4699 if (v->combined_with)
4701 struct induction *other_giv = 0;
4703 for (tv = bl->giv; tv; tv = tv->next_iv)
4704 if (tv->same == v)
4706 if (other_giv)
4707 break;
4708 else
4709 other_giv = tv;
4711 if (! tv && other_giv
4712 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4713 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4714 == INSN_UID (v->insn))
4715 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4716 auto_inc_opt = 1;
4718 /* Check for case where increment is before the address
4719 giv. Do this test in "loop order". */
4720 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4721 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4722 || (INSN_LUID (bl->biv->insn)
4723 > INSN_LUID (loop->scan_start))))
4724 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4725 && (INSN_LUID (loop->scan_start)
4726 < INSN_LUID (bl->biv->insn))))
4727 auto_inc_opt = -1;
4728 else
4729 auto_inc_opt = 1;
4731 #ifdef HAVE_cc0
4733 rtx prev;
4735 /* We can't put an insn immediately after one setting
4736 cc0, or immediately before one using cc0. */
4737 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4738 || (auto_inc_opt == -1
4739 && (prev = prev_nonnote_insn (v->insn)) != 0
4740 && INSN_P (prev)
4741 && sets_cc0_p (PATTERN (prev))))
4742 auto_inc_opt = 0;
4744 #endif
4746 if (auto_inc_opt)
4747 v->auto_inc_opt = 1;
4749 #endif
4751 /* For each place where the biv is incremented, add an insn
4752 to increment the new, reduced reg for the giv. */
4753 for (tv = bl->biv; tv; tv = tv->next_iv)
4755 rtx insert_before;
4757 /* Skip if location is the same as a previous one. */
4758 if (tv->same)
4759 continue;
4760 if (! auto_inc_opt)
4761 insert_before = NEXT_INSN (tv->insn);
4762 else if (auto_inc_opt == 1)
4763 insert_before = NEXT_INSN (v->insn);
4764 else
4765 insert_before = v->insn;
4767 if (tv->mult_val == const1_rtx)
4768 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4769 v->new_reg, v->new_reg,
4770 0, insert_before);
4771 else /* tv->mult_val == const0_rtx */
4772 /* A multiply is acceptable here
4773 since this is presumed to be seldom executed. */
4774 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4775 v->add_val, v->new_reg,
4776 0, insert_before);
4779 /* Add code at loop start to initialize giv's reduced reg. */
4781 loop_iv_add_mult_hoist (loop,
4782 extend_value_for_giv (v, bl->initial_value),
4783 v->mult_val, v->add_val, v->new_reg);
4789 /* Check for givs whose first use is their definition and whose
4790 last use is the definition of another giv. If so, it is likely
4791 dead and should not be used to derive another giv nor to
4792 eliminate a biv. */
4794 static void
4795 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
4797 struct induction *v;
4799 for (v = bl->giv; v; v = v->next_iv)
4801 if (v->ignore
4802 || (v->same && v->same->ignore))
4803 continue;
4805 if (v->giv_type == DEST_REG
4806 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4808 struct induction *v1;
4810 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4811 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4812 v->maybe_dead = 1;
4818 static void
4819 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
4821 struct induction *v;
4823 for (v = bl->giv; v; v = v->next_iv)
4825 if (v->same && v->same->ignore)
4826 v->ignore = 1;
4828 if (v->ignore)
4829 continue;
4831 /* Update expression if this was combined, in case other giv was
4832 replaced. */
4833 if (v->same)
4834 v->new_reg = replace_rtx (v->new_reg,
4835 v->same->dest_reg, v->same->new_reg);
4837 /* See if this register is known to be a pointer to something. If
4838 so, see if we can find the alignment. First see if there is a
4839 destination register that is a pointer. If so, this shares the
4840 alignment too. Next see if we can deduce anything from the
4841 computational information. If not, and this is a DEST_ADDR
4842 giv, at least we know that it's a pointer, though we don't know
4843 the alignment. */
4844 if (GET_CODE (v->new_reg) == REG
4845 && v->giv_type == DEST_REG
4846 && REG_POINTER (v->dest_reg))
4847 mark_reg_pointer (v->new_reg,
4848 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4849 else if (GET_CODE (v->new_reg) == REG
4850 && REG_POINTER (v->src_reg))
4852 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4854 if (align == 0
4855 || GET_CODE (v->add_val) != CONST_INT
4856 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4857 align = 0;
4859 mark_reg_pointer (v->new_reg, align);
4861 else if (GET_CODE (v->new_reg) == REG
4862 && GET_CODE (v->add_val) == REG
4863 && REG_POINTER (v->add_val))
4865 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4867 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4868 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4869 align = 0;
4871 mark_reg_pointer (v->new_reg, align);
4873 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4874 mark_reg_pointer (v->new_reg, 0);
4876 if (v->giv_type == DEST_ADDR)
4877 /* Store reduced reg as the address in the memref where we found
4878 this giv. */
4879 validate_change (v->insn, v->location, v->new_reg, 0);
4880 else if (v->replaceable)
4882 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4884 else
4886 rtx original_insn = v->insn;
4887 rtx note;
4889 /* Not replaceable; emit an insn to set the original giv reg from
4890 the reduced giv, same as above. */
4891 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4892 gen_move_insn (v->dest_reg,
4893 v->new_reg));
4895 /* The original insn may have a REG_EQUAL note. This note is
4896 now incorrect and may result in invalid substitutions later.
4897 The original insn is dead, but may be part of a libcall
4898 sequence, which doesn't seem worth the bother of handling. */
4899 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4900 if (note)
4901 remove_note (original_insn, note);
4904 /* When a loop is reversed, givs which depend on the reversed
4905 biv, and which are live outside the loop, must be set to their
4906 correct final value. This insn is only needed if the giv is
4907 not replaceable. The correct final value is the same as the
4908 value that the giv starts the reversed loop with. */
4909 if (bl->reversed && ! v->replaceable)
4910 loop_iv_add_mult_sink (loop,
4911 extend_value_for_giv (v, bl->initial_value),
4912 v->mult_val, v->add_val, v->dest_reg);
4913 else if (v->final_value)
4914 loop_insn_sink_or_swim (loop,
4915 gen_load_of_final_value (v->dest_reg,
4916 v->final_value));
4918 if (loop_dump_stream)
4920 fprintf (loop_dump_stream, "giv at %d reduced to ",
4921 INSN_UID (v->insn));
4922 print_simple_rtl (loop_dump_stream, v->new_reg);
4923 fprintf (loop_dump_stream, "\n");
4929 static int
4930 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4931 struct iv_class *bl, struct induction *v,
4932 rtx test_reg)
4934 int add_cost;
4935 int benefit;
4937 benefit = v->benefit;
4938 PUT_MODE (test_reg, v->mode);
4939 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4940 test_reg, test_reg);
4942 /* Reduce benefit if not replaceable, since we will insert a
4943 move-insn to replace the insn that calculates this giv. Don't do
4944 this unless the giv is a user variable, since it will often be
4945 marked non-replaceable because of the duplication of the exit
4946 code outside the loop. In such a case, the copies we insert are
4947 dead and will be deleted. So they don't have a cost. Similar
4948 situations exist. */
4949 /* ??? The new final_[bg]iv_value code does a much better job of
4950 finding replaceable giv's, and hence this code may no longer be
4951 necessary. */
4952 if (! v->replaceable && ! bl->eliminable
4953 && REG_USERVAR_P (v->dest_reg))
4954 benefit -= copy_cost;
4956 /* Decrease the benefit to count the add-insns that we will insert
4957 to increment the reduced reg for the giv. ??? This can
4958 overestimate the run-time cost of the additional insns, e.g. if
4959 there are multiple basic blocks that increment the biv, but only
4960 one of these blocks is executed during each iteration. There is
4961 no good way to detect cases like this with the current structure
4962 of the loop optimizer. This code is more accurate for
4963 determining code size than run-time benefits. */
4964 benefit -= add_cost * bl->biv_count;
4966 /* Decide whether to strength-reduce this giv or to leave the code
4967 unchanged (recompute it from the biv each time it is used). This
4968 decision can be made independently for each giv. */
4970 #ifdef AUTO_INC_DEC
4971 /* Attempt to guess whether autoincrement will handle some of the
4972 new add insns; if so, increase BENEFIT (undo the subtraction of
4973 add_cost that was done above). */
4974 if (v->giv_type == DEST_ADDR
4975 /* Increasing the benefit is risky, since this is only a guess.
4976 Avoid increasing register pressure in cases where there would
4977 be no other benefit from reducing this giv. */
4978 && benefit > 0
4979 && GET_CODE (v->mult_val) == CONST_INT)
4981 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4983 if (HAVE_POST_INCREMENT
4984 && INTVAL (v->mult_val) == size)
4985 benefit += add_cost * bl->biv_count;
4986 else if (HAVE_PRE_INCREMENT
4987 && INTVAL (v->mult_val) == size)
4988 benefit += add_cost * bl->biv_count;
4989 else if (HAVE_POST_DECREMENT
4990 && -INTVAL (v->mult_val) == size)
4991 benefit += add_cost * bl->biv_count;
4992 else if (HAVE_PRE_DECREMENT
4993 && -INTVAL (v->mult_val) == size)
4994 benefit += add_cost * bl->biv_count;
4996 #endif
4998 return benefit;
5002 /* Free IV structures for LOOP. */
5004 static void
5005 loop_ivs_free (struct loop *loop)
5007 struct loop_ivs *ivs = LOOP_IVS (loop);
5008 struct iv_class *iv = ivs->list;
5010 free (ivs->regs);
5012 while (iv)
5014 struct iv_class *next = iv->next;
5015 struct induction *induction;
5016 struct induction *next_induction;
5018 for (induction = iv->biv; induction; induction = next_induction)
5020 next_induction = induction->next_iv;
5021 free (induction);
5023 for (induction = iv->giv; induction; induction = next_induction)
5025 next_induction = induction->next_iv;
5026 free (induction);
5029 free (iv);
5030 iv = next;
5035 /* Perform strength reduction and induction variable elimination.
5037 Pseudo registers created during this function will be beyond the
5038 last valid index in several tables including
5039 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5040 problem here, because the added registers cannot be givs outside of
5041 their loop, and hence will never be reconsidered. But scan_loop
5042 must check regnos to make sure they are in bounds. */
5044 static void
5045 strength_reduce (struct loop *loop, int flags)
5047 struct loop_info *loop_info = LOOP_INFO (loop);
5048 struct loop_regs *regs = LOOP_REGS (loop);
5049 struct loop_ivs *ivs = LOOP_IVS (loop);
5050 rtx p;
5051 /* Temporary list pointer for traversing ivs->list. */
5052 struct iv_class *bl;
5053 /* Ratio of extra register life span we can justify
5054 for saving an instruction. More if loop doesn't call subroutines
5055 since in that case saving an insn makes more difference
5056 and more registers are available. */
5057 /* ??? could set this to last value of threshold in move_movables */
5058 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5059 /* Map of pseudo-register replacements. */
5060 rtx *reg_map = NULL;
5061 int reg_map_size;
5062 int unrolled_insn_copies = 0;
5063 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5064 int insn_count = count_insns_in_loop (loop);
5066 addr_placeholder = gen_reg_rtx (Pmode);
5068 ivs->n_regs = max_reg_before_loop;
5069 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
5071 /* Find all BIVs in loop. */
5072 loop_bivs_find (loop);
5074 /* Exit if there are no bivs. */
5075 if (! ivs->list)
5077 /* Can still unroll the loop anyways, but indicate that there is no
5078 strength reduction info available. */
5079 if (flags & LOOP_UNROLL)
5080 unroll_loop (loop, insn_count, 0);
5082 loop_ivs_free (loop);
5083 return;
5086 /* Determine how BIVS are initialized by looking through pre-header
5087 extended basic block. */
5088 loop_bivs_init_find (loop);
5090 /* Look at the each biv and see if we can say anything better about its
5091 initial value from any initializing insns set up above. */
5092 loop_bivs_check (loop);
5094 /* Search the loop for general induction variables. */
5095 loop_givs_find (loop);
5097 /* Try to calculate and save the number of loop iterations. This is
5098 set to zero if the actual number can not be calculated. This must
5099 be called after all giv's have been identified, since otherwise it may
5100 fail if the iteration variable is a giv. */
5101 loop_iterations (loop);
5103 #ifdef HAVE_prefetch
5104 if (flags & LOOP_PREFETCH)
5105 emit_prefetch_instructions (loop);
5106 #endif
5108 /* Now for each giv for which we still don't know whether or not it is
5109 replaceable, check to see if it is replaceable because its final value
5110 can be calculated. This must be done after loop_iterations is called,
5111 so that final_giv_value will work correctly. */
5112 loop_givs_check (loop);
5114 /* Try to prove that the loop counter variable (if any) is always
5115 nonnegative; if so, record that fact with a REG_NONNEG note
5116 so that "decrement and branch until zero" insn can be used. */
5117 check_dbra_loop (loop, insn_count);
5119 /* Create reg_map to hold substitutions for replaceable giv regs.
5120 Some givs might have been made from biv increments, so look at
5121 ivs->reg_iv_type for a suitable size. */
5122 reg_map_size = ivs->n_regs;
5123 reg_map = xcalloc (reg_map_size, sizeof (rtx));
5125 /* Examine each iv class for feasibility of strength reduction/induction
5126 variable elimination. */
5128 for (bl = ivs->list; bl; bl = bl->next)
5130 struct induction *v;
5131 int benefit;
5133 /* Test whether it will be possible to eliminate this biv
5134 provided all givs are reduced. */
5135 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5137 /* This will be true at the end, if all givs which depend on this
5138 biv have been strength reduced.
5139 We can't (currently) eliminate the biv unless this is so. */
5140 bl->all_reduced = 1;
5142 /* Check each extension dependent giv in this class to see if its
5143 root biv is safe from wrapping in the interior mode. */
5144 check_ext_dependent_givs (loop, bl);
5146 /* Combine all giv's for this iv_class. */
5147 combine_givs (regs, bl);
5149 for (v = bl->giv; v; v = v->next_iv)
5151 struct induction *tv;
5153 if (v->ignore || v->same)
5154 continue;
5156 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5158 /* If an insn is not to be strength reduced, then set its ignore
5159 flag, and clear bl->all_reduced. */
5161 /* A giv that depends on a reversed biv must be reduced if it is
5162 used after the loop exit, otherwise, it would have the wrong
5163 value after the loop exit. To make it simple, just reduce all
5164 of such giv's whether or not we know they are used after the loop
5165 exit. */
5167 if (! flag_reduce_all_givs
5168 && v->lifetime * threshold * benefit < insn_count
5169 && ! bl->reversed)
5171 if (loop_dump_stream)
5172 fprintf (loop_dump_stream,
5173 "giv of insn %d not worth while, %d vs %d.\n",
5174 INSN_UID (v->insn),
5175 v->lifetime * threshold * benefit, insn_count);
5176 v->ignore = 1;
5177 bl->all_reduced = 0;
5179 else
5181 /* Check that we can increment the reduced giv without a
5182 multiply insn. If not, reject it. */
5184 for (tv = bl->biv; tv; tv = tv->next_iv)
5185 if (tv->mult_val == const1_rtx
5186 && ! product_cheap_p (tv->add_val, v->mult_val))
5188 if (loop_dump_stream)
5189 fprintf (loop_dump_stream,
5190 "giv of insn %d: would need a multiply.\n",
5191 INSN_UID (v->insn));
5192 v->ignore = 1;
5193 bl->all_reduced = 0;
5194 break;
5199 /* Check for givs whose first use is their definition and whose
5200 last use is the definition of another giv. If so, it is likely
5201 dead and should not be used to derive another giv nor to
5202 eliminate a biv. */
5203 loop_givs_dead_check (loop, bl);
5205 /* Reduce each giv that we decided to reduce. */
5206 loop_givs_reduce (loop, bl);
5208 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5209 as not reduced.
5211 For each giv register that can be reduced now: if replaceable,
5212 substitute reduced reg wherever the old giv occurs;
5213 else add new move insn "giv_reg = reduced_reg". */
5214 loop_givs_rescan (loop, bl, reg_map);
5216 /* All the givs based on the biv bl have been reduced if they
5217 merit it. */
5219 /* For each giv not marked as maybe dead that has been combined with a
5220 second giv, clear any "maybe dead" mark on that second giv.
5221 v->new_reg will either be or refer to the register of the giv it
5222 combined with.
5224 Doing this clearing avoids problems in biv elimination where
5225 a giv's new_reg is a complex value that can't be put in the
5226 insn but the giv combined with (with a reg as new_reg) is
5227 marked maybe_dead. Since the register will be used in either
5228 case, we'd prefer it be used from the simpler giv. */
5230 for (v = bl->giv; v; v = v->next_iv)
5231 if (! v->maybe_dead && v->same)
5232 v->same->maybe_dead = 0;
5234 /* Try to eliminate the biv, if it is a candidate.
5235 This won't work if ! bl->all_reduced,
5236 since the givs we planned to use might not have been reduced.
5238 We have to be careful that we didn't initially think we could
5239 eliminate this biv because of a giv that we now think may be
5240 dead and shouldn't be used as a biv replacement.
5242 Also, there is the possibility that we may have a giv that looks
5243 like it can be used to eliminate a biv, but the resulting insn
5244 isn't valid. This can happen, for example, on the 88k, where a
5245 JUMP_INSN can compare a register only with zero. Attempts to
5246 replace it with a compare with a constant will fail.
5248 Note that in cases where this call fails, we may have replaced some
5249 of the occurrences of the biv with a giv, but no harm was done in
5250 doing so in the rare cases where it can occur. */
5252 if (bl->all_reduced == 1 && bl->eliminable
5253 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5255 /* ?? If we created a new test to bypass the loop entirely,
5256 or otherwise drop straight in, based on this test, then
5257 we might want to rewrite it also. This way some later
5258 pass has more hope of removing the initialization of this
5259 biv entirely. */
5261 /* If final_value != 0, then the biv may be used after loop end
5262 and we must emit an insn to set it just in case.
5264 Reversed bivs already have an insn after the loop setting their
5265 value, so we don't need another one. We can't calculate the
5266 proper final value for such a biv here anyways. */
5267 if (bl->final_value && ! bl->reversed)
5268 loop_insn_sink_or_swim (loop,
5269 gen_load_of_final_value (bl->biv->dest_reg,
5270 bl->final_value));
5272 if (loop_dump_stream)
5273 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5274 bl->regno);
5276 /* See above note wrt final_value. But since we couldn't eliminate
5277 the biv, we must set the value after the loop instead of before. */
5278 else if (bl->final_value && ! bl->reversed)
5279 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5280 bl->final_value));
5283 /* Go through all the instructions in the loop, making all the
5284 register substitutions scheduled in REG_MAP. */
5286 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5287 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5288 || GET_CODE (p) == CALL_INSN)
5290 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5291 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5292 INSN_CODE (p) = -1;
5295 if (loop_info->n_iterations > 0)
5297 /* When we completely unroll a loop we will likely not need the increment
5298 of the loop BIV and we will not need the conditional branch at the
5299 end of the loop. */
5300 unrolled_insn_copies = insn_count - 2;
5302 #ifdef HAVE_cc0
5303 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5304 need the comparison before the conditional branch at the end of the
5305 loop. */
5306 unrolled_insn_copies -= 1;
5307 #endif
5309 /* We'll need one copy for each loop iteration. */
5310 unrolled_insn_copies *= loop_info->n_iterations;
5312 /* A little slop to account for the ability to remove initialization
5313 code, better CSE, and other secondary benefits of completely
5314 unrolling some loops. */
5315 unrolled_insn_copies -= 1;
5317 /* Clamp the value. */
5318 if (unrolled_insn_copies < 0)
5319 unrolled_insn_copies = 0;
5322 /* Unroll loops from within strength reduction so that we can use the
5323 induction variable information that strength_reduce has already
5324 collected. Always unroll loops that would be as small or smaller
5325 unrolled than when rolled. */
5326 if ((flags & LOOP_UNROLL)
5327 || ((flags & LOOP_AUTO_UNROLL)
5328 && loop_info->n_iterations > 0
5329 && unrolled_insn_copies <= insn_count))
5330 unroll_loop (loop, insn_count, 1);
5332 #ifdef HAVE_doloop_end
5333 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5334 doloop_optimize (loop);
5335 #endif /* HAVE_doloop_end */
5337 /* In case number of iterations is known, drop branch prediction note
5338 in the branch. Do that only in second loop pass, as loop unrolling
5339 may change the number of iterations performed. */
5340 if (flags & LOOP_BCT)
5342 unsigned HOST_WIDE_INT n
5343 = loop_info->n_iterations / loop_info->unroll_number;
5344 if (n > 1)
5345 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
5346 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5349 if (loop_dump_stream)
5350 fprintf (loop_dump_stream, "\n");
5352 loop_ivs_free (loop);
5353 if (reg_map)
5354 free (reg_map);
5357 /*Record all basic induction variables calculated in the insn. */
5358 static rtx
5359 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5360 int maybe_multiple)
5362 struct loop_ivs *ivs = LOOP_IVS (loop);
5363 rtx set;
5364 rtx dest_reg;
5365 rtx inc_val;
5366 rtx mult_val;
5367 rtx *location;
5369 if (GET_CODE (p) == INSN
5370 && (set = single_set (p))
5371 && GET_CODE (SET_DEST (set)) == REG)
5373 dest_reg = SET_DEST (set);
5374 if (REGNO (dest_reg) < max_reg_before_loop
5375 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5376 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5378 if (basic_induction_var (loop, SET_SRC (set),
5379 GET_MODE (SET_SRC (set)),
5380 dest_reg, p, &inc_val, &mult_val,
5381 &location))
5383 /* It is a possible basic induction variable.
5384 Create and initialize an induction structure for it. */
5386 struct induction *v = xmalloc (sizeof (struct induction));
5388 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5389 not_every_iteration, maybe_multiple);
5390 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5392 else if (REGNO (dest_reg) < ivs->n_regs)
5393 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5396 return p;
5399 /* Record all givs calculated in the insn.
5400 A register is a giv if: it is only set once, it is a function of a
5401 biv and a constant (or invariant), and it is not a biv. */
5402 static rtx
5403 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5404 int maybe_multiple)
5406 struct loop_regs *regs = LOOP_REGS (loop);
5408 rtx set;
5409 /* Look for a general induction variable in a register. */
5410 if (GET_CODE (p) == INSN
5411 && (set = single_set (p))
5412 && GET_CODE (SET_DEST (set)) == REG
5413 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5415 rtx src_reg;
5416 rtx dest_reg;
5417 rtx add_val;
5418 rtx mult_val;
5419 rtx ext_val;
5420 int benefit;
5421 rtx regnote = 0;
5422 rtx last_consec_insn;
5424 dest_reg = SET_DEST (set);
5425 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5426 return p;
5428 if (/* SET_SRC is a giv. */
5429 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5430 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5431 /* Equivalent expression is a giv. */
5432 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5433 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5434 &add_val, &mult_val, &ext_val, 0,
5435 &benefit, VOIDmode)))
5436 /* Don't try to handle any regs made by loop optimization.
5437 We have nothing on them in regno_first_uid, etc. */
5438 && REGNO (dest_reg) < max_reg_before_loop
5439 /* Don't recognize a BASIC_INDUCT_VAR here. */
5440 && dest_reg != src_reg
5441 /* This must be the only place where the register is set. */
5442 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5443 /* or all sets must be consecutive and make a giv. */
5444 || (benefit = consec_sets_giv (loop, benefit, p,
5445 src_reg, dest_reg,
5446 &add_val, &mult_val, &ext_val,
5447 &last_consec_insn))))
5449 struct induction *v = xmalloc (sizeof (struct induction));
5451 /* If this is a library call, increase benefit. */
5452 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5453 benefit += libcall_benefit (p);
5455 /* Skip the consecutive insns, if there are any. */
5456 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5457 p = last_consec_insn;
5459 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5460 ext_val, benefit, DEST_REG, not_every_iteration,
5461 maybe_multiple, (rtx*) 0);
5466 /* Look for givs which are memory addresses. */
5467 if (GET_CODE (p) == INSN)
5468 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5469 maybe_multiple);
5471 /* Update the status of whether giv can derive other givs. This can
5472 change when we pass a label or an insn that updates a biv. */
5473 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5474 || GET_CODE (p) == CODE_LABEL)
5475 update_giv_derive (loop, p);
5476 return p;
5479 /* Return 1 if X is a valid source for an initial value (or as value being
5480 compared against in an initial test).
5482 X must be either a register or constant and must not be clobbered between
5483 the current insn and the start of the loop.
5485 INSN is the insn containing X. */
5487 static int
5488 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
5490 if (CONSTANT_P (x))
5491 return 1;
5493 /* Only consider pseudos we know about initialized in insns whose luids
5494 we know. */
5495 if (GET_CODE (x) != REG
5496 || REGNO (x) >= max_reg_before_loop)
5497 return 0;
5499 /* Don't use call-clobbered registers across a call which clobbers it. On
5500 some machines, don't use any hard registers at all. */
5501 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5502 && (SMALL_REGISTER_CLASSES
5503 || (call_used_regs[REGNO (x)] && call_seen)))
5504 return 0;
5506 /* Don't use registers that have been clobbered before the start of the
5507 loop. */
5508 if (reg_set_between_p (x, insn, loop_start))
5509 return 0;
5511 return 1;
5514 /* Scan X for memory refs and check each memory address
5515 as a possible giv. INSN is the insn whose pattern X comes from.
5516 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5517 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5518 more than once in each loop iteration. */
5520 static void
5521 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5522 int not_every_iteration, int maybe_multiple)
5524 int i, j;
5525 enum rtx_code code;
5526 const char *fmt;
5528 if (x == 0)
5529 return;
5531 code = GET_CODE (x);
5532 switch (code)
5534 case REG:
5535 case CONST_INT:
5536 case CONST:
5537 case CONST_DOUBLE:
5538 case SYMBOL_REF:
5539 case LABEL_REF:
5540 case PC:
5541 case CC0:
5542 case ADDR_VEC:
5543 case ADDR_DIFF_VEC:
5544 case USE:
5545 case CLOBBER:
5546 return;
5548 case MEM:
5550 rtx src_reg;
5551 rtx add_val;
5552 rtx mult_val;
5553 rtx ext_val;
5554 int benefit;
5556 /* This code used to disable creating GIVs with mult_val == 1 and
5557 add_val == 0. However, this leads to lost optimizations when
5558 it comes time to combine a set of related DEST_ADDR GIVs, since
5559 this one would not be seen. */
5561 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5562 &mult_val, &ext_val, 1, &benefit,
5563 GET_MODE (x)))
5565 /* Found one; record it. */
5566 struct induction *v = xmalloc (sizeof (struct induction));
5568 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5569 add_val, ext_val, benefit, DEST_ADDR,
5570 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5572 v->mem = x;
5575 return;
5577 default:
5578 break;
5581 /* Recursively scan the subexpressions for other mem refs. */
5583 fmt = GET_RTX_FORMAT (code);
5584 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5585 if (fmt[i] == 'e')
5586 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5587 maybe_multiple);
5588 else if (fmt[i] == 'E')
5589 for (j = 0; j < XVECLEN (x, i); j++)
5590 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5591 maybe_multiple);
5594 /* Fill in the data about one biv update.
5595 V is the `struct induction' in which we record the biv. (It is
5596 allocated by the caller, with alloca.)
5597 INSN is the insn that sets it.
5598 DEST_REG is the biv's reg.
5600 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5601 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5602 being set to INC_VAL.
5604 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5605 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5606 can be executed more than once per iteration. If MAYBE_MULTIPLE
5607 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5608 executed exactly once per iteration. */
5610 static void
5611 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5612 rtx inc_val, rtx mult_val, rtx *location,
5613 int not_every_iteration, int maybe_multiple)
5615 struct loop_ivs *ivs = LOOP_IVS (loop);
5616 struct iv_class *bl;
5618 v->insn = insn;
5619 v->src_reg = dest_reg;
5620 v->dest_reg = dest_reg;
5621 v->mult_val = mult_val;
5622 v->add_val = inc_val;
5623 v->ext_dependent = NULL_RTX;
5624 v->location = location;
5625 v->mode = GET_MODE (dest_reg);
5626 v->always_computable = ! not_every_iteration;
5627 v->always_executed = ! not_every_iteration;
5628 v->maybe_multiple = maybe_multiple;
5629 v->same = 0;
5631 /* Add this to the reg's iv_class, creating a class
5632 if this is the first incrementation of the reg. */
5634 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5635 if (bl == 0)
5637 /* Create and initialize new iv_class. */
5639 bl = xmalloc (sizeof (struct iv_class));
5641 bl->regno = REGNO (dest_reg);
5642 bl->biv = 0;
5643 bl->giv = 0;
5644 bl->biv_count = 0;
5645 bl->giv_count = 0;
5647 /* Set initial value to the reg itself. */
5648 bl->initial_value = dest_reg;
5649 bl->final_value = 0;
5650 /* We haven't seen the initializing insn yet */
5651 bl->init_insn = 0;
5652 bl->init_set = 0;
5653 bl->initial_test = 0;
5654 bl->incremented = 0;
5655 bl->eliminable = 0;
5656 bl->nonneg = 0;
5657 bl->reversed = 0;
5658 bl->total_benefit = 0;
5660 /* Add this class to ivs->list. */
5661 bl->next = ivs->list;
5662 ivs->list = bl;
5664 /* Put it in the array of biv register classes. */
5665 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5667 else
5669 /* Check if location is the same as a previous one. */
5670 struct induction *induction;
5671 for (induction = bl->biv; induction; induction = induction->next_iv)
5672 if (location == induction->location)
5674 v->same = induction;
5675 break;
5679 /* Update IV_CLASS entry for this biv. */
5680 v->next_iv = bl->biv;
5681 bl->biv = v;
5682 bl->biv_count++;
5683 if (mult_val == const1_rtx)
5684 bl->incremented = 1;
5686 if (loop_dump_stream)
5687 loop_biv_dump (v, loop_dump_stream, 0);
5690 /* Fill in the data about one giv.
5691 V is the `struct induction' in which we record the giv. (It is
5692 allocated by the caller, with alloca.)
5693 INSN is the insn that sets it.
5694 BENEFIT estimates the savings from deleting this insn.
5695 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5696 into a register or is used as a memory address.
5698 SRC_REG is the biv reg which the giv is computed from.
5699 DEST_REG is the giv's reg (if the giv is stored in a reg).
5700 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5701 LOCATION points to the place where this giv's value appears in INSN. */
5703 static void
5704 record_giv (const struct loop *loop, struct induction *v, rtx insn,
5705 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5706 rtx ext_val, int benefit, enum g_types type,
5707 int not_every_iteration, int maybe_multiple, rtx *location)
5709 struct loop_ivs *ivs = LOOP_IVS (loop);
5710 struct induction *b;
5711 struct iv_class *bl;
5712 rtx set = single_set (insn);
5713 rtx temp;
5715 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5716 undo the MULT canonicalization that we performed earlier. */
5717 temp = simplify_rtx (add_val);
5718 if (temp
5719 && ! (GET_CODE (add_val) == MULT
5720 && GET_CODE (temp) == ASHIFT))
5721 add_val = temp;
5723 v->insn = insn;
5724 v->src_reg = src_reg;
5725 v->giv_type = type;
5726 v->dest_reg = dest_reg;
5727 v->mult_val = mult_val;
5728 v->add_val = add_val;
5729 v->ext_dependent = ext_val;
5730 v->benefit = benefit;
5731 v->location = location;
5732 v->cant_derive = 0;
5733 v->combined_with = 0;
5734 v->maybe_multiple = maybe_multiple;
5735 v->maybe_dead = 0;
5736 v->derive_adjustment = 0;
5737 v->same = 0;
5738 v->ignore = 0;
5739 v->new_reg = 0;
5740 v->final_value = 0;
5741 v->same_insn = 0;
5742 v->auto_inc_opt = 0;
5743 v->unrolled = 0;
5744 v->shared = 0;
5746 /* The v->always_computable field is used in update_giv_derive, to
5747 determine whether a giv can be used to derive another giv. For a
5748 DEST_REG giv, INSN computes a new value for the giv, so its value
5749 isn't computable if INSN insn't executed every iteration.
5750 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5751 it does not compute a new value. Hence the value is always computable
5752 regardless of whether INSN is executed each iteration. */
5754 if (type == DEST_ADDR)
5755 v->always_computable = 1;
5756 else
5757 v->always_computable = ! not_every_iteration;
5759 v->always_executed = ! not_every_iteration;
5761 if (type == DEST_ADDR)
5763 v->mode = GET_MODE (*location);
5764 v->lifetime = 1;
5766 else /* type == DEST_REG */
5768 v->mode = GET_MODE (SET_DEST (set));
5770 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5772 /* If the lifetime is zero, it means that this register is
5773 really a dead store. So mark this as a giv that can be
5774 ignored. This will not prevent the biv from being eliminated. */
5775 if (v->lifetime == 0)
5776 v->ignore = 1;
5778 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5779 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5782 /* Add the giv to the class of givs computed from one biv. */
5784 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5785 if (bl)
5787 v->next_iv = bl->giv;
5788 bl->giv = v;
5789 /* Don't count DEST_ADDR. This is supposed to count the number of
5790 insns that calculate givs. */
5791 if (type == DEST_REG)
5792 bl->giv_count++;
5793 bl->total_benefit += benefit;
5795 else
5796 /* Fatal error, biv missing for this giv? */
5797 abort ();
5799 if (type == DEST_ADDR)
5801 v->replaceable = 1;
5802 v->not_replaceable = 0;
5804 else
5806 /* The giv can be replaced outright by the reduced register only if all
5807 of the following conditions are true:
5808 - the insn that sets the giv is always executed on any iteration
5809 on which the giv is used at all
5810 (there are two ways to deduce this:
5811 either the insn is executed on every iteration,
5812 or all uses follow that insn in the same basic block),
5813 - the giv is not used outside the loop
5814 - no assignments to the biv occur during the giv's lifetime. */
5816 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5817 /* Previous line always fails if INSN was moved by loop opt. */
5818 && REGNO_LAST_LUID (REGNO (dest_reg))
5819 < INSN_LUID (loop->end)
5820 && (! not_every_iteration
5821 || last_use_this_basic_block (dest_reg, insn)))
5823 /* Now check that there are no assignments to the biv within the
5824 giv's lifetime. This requires two separate checks. */
5826 /* Check each biv update, and fail if any are between the first
5827 and last use of the giv.
5829 If this loop contains an inner loop that was unrolled, then
5830 the insn modifying the biv may have been emitted by the loop
5831 unrolling code, and hence does not have a valid luid. Just
5832 mark the biv as not replaceable in this case. It is not very
5833 useful as a biv, because it is used in two different loops.
5834 It is very unlikely that we would be able to optimize the giv
5835 using this biv anyways. */
5837 v->replaceable = 1;
5838 v->not_replaceable = 0;
5839 for (b = bl->biv; b; b = b->next_iv)
5841 if (INSN_UID (b->insn) >= max_uid_for_loop
5842 || ((INSN_LUID (b->insn)
5843 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5844 && (INSN_LUID (b->insn)
5845 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5847 v->replaceable = 0;
5848 v->not_replaceable = 1;
5849 break;
5853 /* If there are any backwards branches that go from after the
5854 biv update to before it, then this giv is not replaceable. */
5855 if (v->replaceable)
5856 for (b = bl->biv; b; b = b->next_iv)
5857 if (back_branch_in_range_p (loop, b->insn))
5859 v->replaceable = 0;
5860 v->not_replaceable = 1;
5861 break;
5864 else
5866 /* May still be replaceable, we don't have enough info here to
5867 decide. */
5868 v->replaceable = 0;
5869 v->not_replaceable = 0;
5873 /* Record whether the add_val contains a const_int, for later use by
5874 combine_givs. */
5876 rtx tem = add_val;
5878 v->no_const_addval = 1;
5879 if (tem == const0_rtx)
5881 else if (CONSTANT_P (add_val))
5882 v->no_const_addval = 0;
5883 if (GET_CODE (tem) == PLUS)
5885 while (1)
5887 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5888 tem = XEXP (tem, 0);
5889 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5890 tem = XEXP (tem, 1);
5891 else
5892 break;
5894 if (CONSTANT_P (XEXP (tem, 1)))
5895 v->no_const_addval = 0;
5899 if (loop_dump_stream)
5900 loop_giv_dump (v, loop_dump_stream, 0);
5903 /* All this does is determine whether a giv can be made replaceable because
5904 its final value can be calculated. This code can not be part of record_giv
5905 above, because final_giv_value requires that the number of loop iterations
5906 be known, and that can not be accurately calculated until after all givs
5907 have been identified. */
5909 static void
5910 check_final_value (const struct loop *loop, struct induction *v)
5912 rtx final_value = 0;
5914 /* DEST_ADDR givs will never reach here, because they are always marked
5915 replaceable above in record_giv. */
5917 /* The giv can be replaced outright by the reduced register only if all
5918 of the following conditions are true:
5919 - the insn that sets the giv is always executed on any iteration
5920 on which the giv is used at all
5921 (there are two ways to deduce this:
5922 either the insn is executed on every iteration,
5923 or all uses follow that insn in the same basic block),
5924 - its final value can be calculated (this condition is different
5925 than the one above in record_giv)
5926 - it's not used before the it's set
5927 - no assignments to the biv occur during the giv's lifetime. */
5929 #if 0
5930 /* This is only called now when replaceable is known to be false. */
5931 /* Clear replaceable, so that it won't confuse final_giv_value. */
5932 v->replaceable = 0;
5933 #endif
5935 if ((final_value = final_giv_value (loop, v))
5936 && (v->always_executed
5937 || last_use_this_basic_block (v->dest_reg, v->insn)))
5939 int biv_increment_seen = 0, before_giv_insn = 0;
5940 rtx p = v->insn;
5941 rtx last_giv_use;
5943 v->replaceable = 1;
5944 v->not_replaceable = 0;
5946 /* When trying to determine whether or not a biv increment occurs
5947 during the lifetime of the giv, we can ignore uses of the variable
5948 outside the loop because final_value is true. Hence we can not
5949 use regno_last_uid and regno_first_uid as above in record_giv. */
5951 /* Search the loop to determine whether any assignments to the
5952 biv occur during the giv's lifetime. Start with the insn
5953 that sets the giv, and search around the loop until we come
5954 back to that insn again.
5956 Also fail if there is a jump within the giv's lifetime that jumps
5957 to somewhere outside the lifetime but still within the loop. This
5958 catches spaghetti code where the execution order is not linear, and
5959 hence the above test fails. Here we assume that the giv lifetime
5960 does not extend from one iteration of the loop to the next, so as
5961 to make the test easier. Since the lifetime isn't known yet,
5962 this requires two loops. See also record_giv above. */
5964 last_giv_use = v->insn;
5966 while (1)
5968 p = NEXT_INSN (p);
5969 if (p == loop->end)
5971 before_giv_insn = 1;
5972 p = NEXT_INSN (loop->start);
5974 if (p == v->insn)
5975 break;
5977 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5978 || GET_CODE (p) == CALL_INSN)
5980 /* It is possible for the BIV increment to use the GIV if we
5981 have a cycle. Thus we must be sure to check each insn for
5982 both BIV and GIV uses, and we must check for BIV uses
5983 first. */
5985 if (! biv_increment_seen
5986 && reg_set_p (v->src_reg, PATTERN (p)))
5987 biv_increment_seen = 1;
5989 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5991 if (biv_increment_seen || before_giv_insn)
5993 v->replaceable = 0;
5994 v->not_replaceable = 1;
5995 break;
5997 last_giv_use = p;
6002 /* Now that the lifetime of the giv is known, check for branches
6003 from within the lifetime to outside the lifetime if it is still
6004 replaceable. */
6006 if (v->replaceable)
6008 p = v->insn;
6009 while (1)
6011 p = NEXT_INSN (p);
6012 if (p == loop->end)
6013 p = NEXT_INSN (loop->start);
6014 if (p == last_giv_use)
6015 break;
6017 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6018 && LABEL_NAME (JUMP_LABEL (p))
6019 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6020 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6021 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6022 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6024 v->replaceable = 0;
6025 v->not_replaceable = 1;
6027 if (loop_dump_stream)
6028 fprintf (loop_dump_stream,
6029 "Found branch outside giv lifetime.\n");
6031 break;
6036 /* If it is replaceable, then save the final value. */
6037 if (v->replaceable)
6038 v->final_value = final_value;
6041 if (loop_dump_stream && v->replaceable)
6042 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6043 INSN_UID (v->insn), REGNO (v->dest_reg));
6046 /* Update the status of whether a giv can derive other givs.
6048 We need to do something special if there is or may be an update to the biv
6049 between the time the giv is defined and the time it is used to derive
6050 another giv.
6052 In addition, a giv that is only conditionally set is not allowed to
6053 derive another giv once a label has been passed.
6055 The cases we look at are when a label or an update to a biv is passed. */
6057 static void
6058 update_giv_derive (const struct loop *loop, rtx p)
6060 struct loop_ivs *ivs = LOOP_IVS (loop);
6061 struct iv_class *bl;
6062 struct induction *biv, *giv;
6063 rtx tem;
6064 int dummy;
6066 /* Search all IV classes, then all bivs, and finally all givs.
6068 There are three cases we are concerned with. First we have the situation
6069 of a giv that is only updated conditionally. In that case, it may not
6070 derive any givs after a label is passed.
6072 The second case is when a biv update occurs, or may occur, after the
6073 definition of a giv. For certain biv updates (see below) that are
6074 known to occur between the giv definition and use, we can adjust the
6075 giv definition. For others, or when the biv update is conditional,
6076 we must prevent the giv from deriving any other givs. There are two
6077 sub-cases within this case.
6079 If this is a label, we are concerned with any biv update that is done
6080 conditionally, since it may be done after the giv is defined followed by
6081 a branch here (actually, we need to pass both a jump and a label, but
6082 this extra tracking doesn't seem worth it).
6084 If this is a jump, we are concerned about any biv update that may be
6085 executed multiple times. We are actually only concerned about
6086 backward jumps, but it is probably not worth performing the test
6087 on the jump again here.
6089 If this is a biv update, we must adjust the giv status to show that a
6090 subsequent biv update was performed. If this adjustment cannot be done,
6091 the giv cannot derive further givs. */
6093 for (bl = ivs->list; bl; bl = bl->next)
6094 for (biv = bl->biv; biv; biv = biv->next_iv)
6095 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6096 || biv->insn == p)
6098 for (giv = bl->giv; giv; giv = giv->next_iv)
6100 /* If cant_derive is already true, there is no point in
6101 checking all of these conditions again. */
6102 if (giv->cant_derive)
6103 continue;
6105 /* If this giv is conditionally set and we have passed a label,
6106 it cannot derive anything. */
6107 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6108 giv->cant_derive = 1;
6110 /* Skip givs that have mult_val == 0, since
6111 they are really invariants. Also skip those that are
6112 replaceable, since we know their lifetime doesn't contain
6113 any biv update. */
6114 else if (giv->mult_val == const0_rtx || giv->replaceable)
6115 continue;
6117 /* The only way we can allow this giv to derive another
6118 is if this is a biv increment and we can form the product
6119 of biv->add_val and giv->mult_val. In this case, we will
6120 be able to compute a compensation. */
6121 else if (biv->insn == p)
6123 rtx ext_val_dummy;
6125 tem = 0;
6126 if (biv->mult_val == const1_rtx)
6127 tem = simplify_giv_expr (loop,
6128 gen_rtx_MULT (giv->mode,
6129 biv->add_val,
6130 giv->mult_val),
6131 &ext_val_dummy, &dummy);
6133 if (tem && giv->derive_adjustment)
6134 tem = simplify_giv_expr
6135 (loop,
6136 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6137 &ext_val_dummy, &dummy);
6139 if (tem)
6140 giv->derive_adjustment = tem;
6141 else
6142 giv->cant_derive = 1;
6144 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6145 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6146 giv->cant_derive = 1;
6151 /* Check whether an insn is an increment legitimate for a basic induction var.
6152 X is the source of insn P, or a part of it.
6153 MODE is the mode in which X should be interpreted.
6155 DEST_REG is the putative biv, also the destination of the insn.
6156 We accept patterns of these forms:
6157 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6158 REG = INVARIANT + REG
6160 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6161 store the additive term into *INC_VAL, and store the place where
6162 we found the additive term into *LOCATION.
6164 If X is an assignment of an invariant into DEST_REG, we set
6165 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6167 We also want to detect a BIV when it corresponds to a variable
6168 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6169 of the variable may be a PLUS that adds a SUBREG of that variable to
6170 an invariant and then sign- or zero-extends the result of the PLUS
6171 into the variable.
6173 Most GIVs in such cases will be in the promoted mode, since that is the
6174 probably the natural computation mode (and almost certainly the mode
6175 used for addresses) on the machine. So we view the pseudo-reg containing
6176 the variable as the BIV, as if it were simply incremented.
6178 Note that treating the entire pseudo as a BIV will result in making
6179 simple increments to any GIVs based on it. However, if the variable
6180 overflows in its declared mode but not its promoted mode, the result will
6181 be incorrect. This is acceptable if the variable is signed, since
6182 overflows in such cases are undefined, but not if it is unsigned, since
6183 those overflows are defined. So we only check for SIGN_EXTEND and
6184 not ZERO_EXTEND.
6186 If we cannot find a biv, we return 0. */
6188 static int
6189 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6190 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6191 rtx **location)
6193 enum rtx_code code;
6194 rtx *argp, arg;
6195 rtx insn, set = 0, last, inc;
6197 code = GET_CODE (x);
6198 *location = NULL;
6199 switch (code)
6201 case PLUS:
6202 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6203 || (GET_CODE (XEXP (x, 0)) == SUBREG
6204 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6205 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6207 argp = &XEXP (x, 1);
6209 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6210 || (GET_CODE (XEXP (x, 1)) == SUBREG
6211 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6212 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6214 argp = &XEXP (x, 0);
6216 else
6217 return 0;
6219 arg = *argp;
6220 if (loop_invariant_p (loop, arg) != 1)
6221 return 0;
6223 /* convert_modes can emit new instructions, e.g. when arg is a loop
6224 invariant MEM and dest_reg has a different mode.
6225 These instructions would be emitted after the end of the function
6226 and then *inc_val would be an unitialized pseudo.
6227 Detect this and bail in this case.
6228 Other alternatives to solve this can be introducing a convert_modes
6229 variant which is allowed to fail but not allowed to emit new
6230 instructions, emit these instructions before loop start and let
6231 it be garbage collected if *inc_val is never used or saving the
6232 *inc_val initialization sequence generated here and when *inc_val
6233 is going to be actually used, emit it at some suitable place. */
6234 last = get_last_insn ();
6235 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6236 if (get_last_insn () != last)
6238 delete_insns_since (last);
6239 return 0;
6242 *inc_val = inc;
6243 *mult_val = const1_rtx;
6244 *location = argp;
6245 return 1;
6247 case SUBREG:
6248 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6249 handle addition of promoted variables.
6250 ??? The comment at the start of this function is wrong: promoted
6251 variable increments don't look like it says they do. */
6252 return basic_induction_var (loop, SUBREG_REG (x),
6253 GET_MODE (SUBREG_REG (x)),
6254 dest_reg, p, inc_val, mult_val, location);
6256 case REG:
6257 /* If this register is assigned in a previous insn, look at its
6258 source, but don't go outside the loop or past a label. */
6260 /* If this sets a register to itself, we would repeat any previous
6261 biv increment if we applied this strategy blindly. */
6262 if (rtx_equal_p (dest_reg, x))
6263 return 0;
6265 insn = p;
6266 while (1)
6268 rtx dest;
6271 insn = PREV_INSN (insn);
6273 while (insn && GET_CODE (insn) == NOTE
6274 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6276 if (!insn)
6277 break;
6278 set = single_set (insn);
6279 if (set == 0)
6280 break;
6281 dest = SET_DEST (set);
6282 if (dest == x
6283 || (GET_CODE (dest) == SUBREG
6284 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6285 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6286 && SUBREG_REG (dest) == x))
6287 return basic_induction_var (loop, SET_SRC (set),
6288 (GET_MODE (SET_SRC (set)) == VOIDmode
6289 ? GET_MODE (x)
6290 : GET_MODE (SET_SRC (set))),
6291 dest_reg, insn,
6292 inc_val, mult_val, location);
6294 while (GET_CODE (dest) == SIGN_EXTRACT
6295 || GET_CODE (dest) == ZERO_EXTRACT
6296 || GET_CODE (dest) == SUBREG
6297 || GET_CODE (dest) == STRICT_LOW_PART)
6298 dest = XEXP (dest, 0);
6299 if (dest == x)
6300 break;
6302 /* Fall through. */
6304 /* Can accept constant setting of biv only when inside inner most loop.
6305 Otherwise, a biv of an inner loop may be incorrectly recognized
6306 as a biv of the outer loop,
6307 causing code to be moved INTO the inner loop. */
6308 case MEM:
6309 if (loop_invariant_p (loop, x) != 1)
6310 return 0;
6311 case CONST_INT:
6312 case SYMBOL_REF:
6313 case CONST:
6314 /* convert_modes aborts if we try to convert to or from CCmode, so just
6315 exclude that case. It is very unlikely that a condition code value
6316 would be a useful iterator anyways. convert_modes aborts if we try to
6317 convert a float mode to non-float or vice versa too. */
6318 if (loop->level == 1
6319 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6320 && GET_MODE_CLASS (mode) != MODE_CC)
6322 /* Possible bug here? Perhaps we don't know the mode of X. */
6323 last = get_last_insn ();
6324 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6325 if (get_last_insn () != last)
6327 delete_insns_since (last);
6328 return 0;
6331 *inc_val = inc;
6332 *mult_val = const0_rtx;
6333 return 1;
6335 else
6336 return 0;
6338 case SIGN_EXTEND:
6339 /* Ignore this BIV if signed arithmetic overflow is defined. */
6340 if (flag_wrapv)
6341 return 0;
6342 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6343 dest_reg, p, inc_val, mult_val, location);
6345 case ASHIFTRT:
6346 /* Similar, since this can be a sign extension. */
6347 for (insn = PREV_INSN (p);
6348 (insn && GET_CODE (insn) == NOTE
6349 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6350 insn = PREV_INSN (insn))
6353 if (insn)
6354 set = single_set (insn);
6356 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6357 && set && SET_DEST (set) == XEXP (x, 0)
6358 && GET_CODE (XEXP (x, 1)) == CONST_INT
6359 && INTVAL (XEXP (x, 1)) >= 0
6360 && GET_CODE (SET_SRC (set)) == ASHIFT
6361 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6362 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6363 GET_MODE (XEXP (x, 0)),
6364 dest_reg, insn, inc_val, mult_val,
6365 location);
6366 return 0;
6368 default:
6369 return 0;
6373 /* A general induction variable (giv) is any quantity that is a linear
6374 function of a basic induction variable,
6375 i.e. giv = biv * mult_val + add_val.
6376 The coefficients can be any loop invariant quantity.
6377 A giv need not be computed directly from the biv;
6378 it can be computed by way of other givs. */
6380 /* Determine whether X computes a giv.
6381 If it does, return a nonzero value
6382 which is the benefit from eliminating the computation of X;
6383 set *SRC_REG to the register of the biv that it is computed from;
6384 set *ADD_VAL and *MULT_VAL to the coefficients,
6385 such that the value of X is biv * mult + add; */
6387 static int
6388 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6389 rtx *add_val, rtx *mult_val, rtx *ext_val,
6390 int is_addr, int *pbenefit,
6391 enum machine_mode addr_mode)
6393 struct loop_ivs *ivs = LOOP_IVS (loop);
6394 rtx orig_x = x;
6396 /* If this is an invariant, forget it, it isn't a giv. */
6397 if (loop_invariant_p (loop, x) == 1)
6398 return 0;
6400 *pbenefit = 0;
6401 *ext_val = NULL_RTX;
6402 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6403 if (x == 0)
6404 return 0;
6406 switch (GET_CODE (x))
6408 case USE:
6409 case CONST_INT:
6410 /* Since this is now an invariant and wasn't before, it must be a giv
6411 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6412 with. */
6413 *src_reg = ivs->list->biv->dest_reg;
6414 *mult_val = const0_rtx;
6415 *add_val = x;
6416 break;
6418 case REG:
6419 /* This is equivalent to a BIV. */
6420 *src_reg = x;
6421 *mult_val = const1_rtx;
6422 *add_val = const0_rtx;
6423 break;
6425 case PLUS:
6426 /* Either (plus (biv) (invar)) or
6427 (plus (mult (biv) (invar_1)) (invar_2)). */
6428 if (GET_CODE (XEXP (x, 0)) == MULT)
6430 *src_reg = XEXP (XEXP (x, 0), 0);
6431 *mult_val = XEXP (XEXP (x, 0), 1);
6433 else
6435 *src_reg = XEXP (x, 0);
6436 *mult_val = const1_rtx;
6438 *add_val = XEXP (x, 1);
6439 break;
6441 case MULT:
6442 /* ADD_VAL is zero. */
6443 *src_reg = XEXP (x, 0);
6444 *mult_val = XEXP (x, 1);
6445 *add_val = const0_rtx;
6446 break;
6448 default:
6449 abort ();
6452 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6453 unless they are CONST_INT). */
6454 if (GET_CODE (*add_val) == USE)
6455 *add_val = XEXP (*add_val, 0);
6456 if (GET_CODE (*mult_val) == USE)
6457 *mult_val = XEXP (*mult_val, 0);
6459 if (is_addr)
6460 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6461 else
6462 *pbenefit += rtx_cost (orig_x, SET);
6464 /* Always return true if this is a giv so it will be detected as such,
6465 even if the benefit is zero or negative. This allows elimination
6466 of bivs that might otherwise not be eliminated. */
6467 return 1;
6470 /* Given an expression, X, try to form it as a linear function of a biv.
6471 We will canonicalize it to be of the form
6472 (plus (mult (BIV) (invar_1))
6473 (invar_2))
6474 with possible degeneracies.
6476 The invariant expressions must each be of a form that can be used as a
6477 machine operand. We surround then with a USE rtx (a hack, but localized
6478 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6479 routine; it is the caller's responsibility to strip them.
6481 If no such canonicalization is possible (i.e., two biv's are used or an
6482 expression that is neither invariant nor a biv or giv), this routine
6483 returns 0.
6485 For a nonzero return, the result will have a code of CONST_INT, USE,
6486 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6488 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6490 static rtx sge_plus (enum machine_mode, rtx, rtx);
6491 static rtx sge_plus_constant (rtx, rtx);
6493 static rtx
6494 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
6496 struct loop_ivs *ivs = LOOP_IVS (loop);
6497 struct loop_regs *regs = LOOP_REGS (loop);
6498 enum machine_mode mode = GET_MODE (x);
6499 rtx arg0, arg1;
6500 rtx tem;
6502 /* If this is not an integer mode, or if we cannot do arithmetic in this
6503 mode, this can't be a giv. */
6504 if (mode != VOIDmode
6505 && (GET_MODE_CLASS (mode) != MODE_INT
6506 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6507 return NULL_RTX;
6509 switch (GET_CODE (x))
6511 case PLUS:
6512 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6513 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6514 if (arg0 == 0 || arg1 == 0)
6515 return NULL_RTX;
6517 /* Put constant last, CONST_INT last if both constant. */
6518 if ((GET_CODE (arg0) == USE
6519 || GET_CODE (arg0) == CONST_INT)
6520 && ! ((GET_CODE (arg0) == USE
6521 && GET_CODE (arg1) == USE)
6522 || GET_CODE (arg1) == CONST_INT))
6523 tem = arg0, arg0 = arg1, arg1 = tem;
6525 /* Handle addition of zero, then addition of an invariant. */
6526 if (arg1 == const0_rtx)
6527 return arg0;
6528 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6529 switch (GET_CODE (arg0))
6531 case CONST_INT:
6532 case USE:
6533 /* Adding two invariants must result in an invariant, so enclose
6534 addition operation inside a USE and return it. */
6535 if (GET_CODE (arg0) == USE)
6536 arg0 = XEXP (arg0, 0);
6537 if (GET_CODE (arg1) == USE)
6538 arg1 = XEXP (arg1, 0);
6540 if (GET_CODE (arg0) == CONST_INT)
6541 tem = arg0, arg0 = arg1, arg1 = tem;
6542 if (GET_CODE (arg1) == CONST_INT)
6543 tem = sge_plus_constant (arg0, arg1);
6544 else
6545 tem = sge_plus (mode, arg0, arg1);
6547 if (GET_CODE (tem) != CONST_INT)
6548 tem = gen_rtx_USE (mode, tem);
6549 return tem;
6551 case REG:
6552 case MULT:
6553 /* biv + invar or mult + invar. Return sum. */
6554 return gen_rtx_PLUS (mode, arg0, arg1);
6556 case PLUS:
6557 /* (a + invar_1) + invar_2. Associate. */
6558 return
6559 simplify_giv_expr (loop,
6560 gen_rtx_PLUS (mode,
6561 XEXP (arg0, 0),
6562 gen_rtx_PLUS (mode,
6563 XEXP (arg0, 1),
6564 arg1)),
6565 ext_val, benefit);
6567 default:
6568 abort ();
6571 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6572 MULT to reduce cases. */
6573 if (GET_CODE (arg0) == REG)
6574 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6575 if (GET_CODE (arg1) == REG)
6576 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6578 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6579 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6580 Recurse to associate the second PLUS. */
6581 if (GET_CODE (arg1) == MULT)
6582 tem = arg0, arg0 = arg1, arg1 = tem;
6584 if (GET_CODE (arg1) == PLUS)
6585 return
6586 simplify_giv_expr (loop,
6587 gen_rtx_PLUS (mode,
6588 gen_rtx_PLUS (mode, arg0,
6589 XEXP (arg1, 0)),
6590 XEXP (arg1, 1)),
6591 ext_val, benefit);
6593 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6594 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6595 return NULL_RTX;
6597 if (!rtx_equal_p (arg0, arg1))
6598 return NULL_RTX;
6600 return simplify_giv_expr (loop,
6601 gen_rtx_MULT (mode,
6602 XEXP (arg0, 0),
6603 gen_rtx_PLUS (mode,
6604 XEXP (arg0, 1),
6605 XEXP (arg1, 1))),
6606 ext_val, benefit);
6608 case MINUS:
6609 /* Handle "a - b" as "a + b * (-1)". */
6610 return simplify_giv_expr (loop,
6611 gen_rtx_PLUS (mode,
6612 XEXP (x, 0),
6613 gen_rtx_MULT (mode,
6614 XEXP (x, 1),
6615 constm1_rtx)),
6616 ext_val, benefit);
6618 case MULT:
6619 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6620 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6621 if (arg0 == 0 || arg1 == 0)
6622 return NULL_RTX;
6624 /* Put constant last, CONST_INT last if both constant. */
6625 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6626 && GET_CODE (arg1) != CONST_INT)
6627 tem = arg0, arg0 = arg1, arg1 = tem;
6629 /* If second argument is not now constant, not giv. */
6630 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6631 return NULL_RTX;
6633 /* Handle multiply by 0 or 1. */
6634 if (arg1 == const0_rtx)
6635 return const0_rtx;
6637 else if (arg1 == const1_rtx)
6638 return arg0;
6640 switch (GET_CODE (arg0))
6642 case REG:
6643 /* biv * invar. Done. */
6644 return gen_rtx_MULT (mode, arg0, arg1);
6646 case CONST_INT:
6647 /* Product of two constants. */
6648 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6650 case USE:
6651 /* invar * invar is a giv, but attempt to simplify it somehow. */
6652 if (GET_CODE (arg1) != CONST_INT)
6653 return NULL_RTX;
6655 arg0 = XEXP (arg0, 0);
6656 if (GET_CODE (arg0) == MULT)
6658 /* (invar_0 * invar_1) * invar_2. Associate. */
6659 return simplify_giv_expr (loop,
6660 gen_rtx_MULT (mode,
6661 XEXP (arg0, 0),
6662 gen_rtx_MULT (mode,
6663 XEXP (arg0,
6665 arg1)),
6666 ext_val, benefit);
6668 /* Propagate the MULT expressions to the innermost nodes. */
6669 else if (GET_CODE (arg0) == PLUS)
6671 /* (invar_0 + invar_1) * invar_2. Distribute. */
6672 return simplify_giv_expr (loop,
6673 gen_rtx_PLUS (mode,
6674 gen_rtx_MULT (mode,
6675 XEXP (arg0,
6677 arg1),
6678 gen_rtx_MULT (mode,
6679 XEXP (arg0,
6681 arg1)),
6682 ext_val, benefit);
6684 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6686 case MULT:
6687 /* (a * invar_1) * invar_2. Associate. */
6688 return simplify_giv_expr (loop,
6689 gen_rtx_MULT (mode,
6690 XEXP (arg0, 0),
6691 gen_rtx_MULT (mode,
6692 XEXP (arg0, 1),
6693 arg1)),
6694 ext_val, benefit);
6696 case PLUS:
6697 /* (a + invar_1) * invar_2. Distribute. */
6698 return simplify_giv_expr (loop,
6699 gen_rtx_PLUS (mode,
6700 gen_rtx_MULT (mode,
6701 XEXP (arg0, 0),
6702 arg1),
6703 gen_rtx_MULT (mode,
6704 XEXP (arg0, 1),
6705 arg1)),
6706 ext_val, benefit);
6708 default:
6709 abort ();
6712 case ASHIFT:
6713 /* Shift by constant is multiply by power of two. */
6714 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6715 return 0;
6717 return
6718 simplify_giv_expr (loop,
6719 gen_rtx_MULT (mode,
6720 XEXP (x, 0),
6721 GEN_INT ((HOST_WIDE_INT) 1
6722 << INTVAL (XEXP (x, 1)))),
6723 ext_val, benefit);
6725 case NEG:
6726 /* "-a" is "a * (-1)" */
6727 return simplify_giv_expr (loop,
6728 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6729 ext_val, benefit);
6731 case NOT:
6732 /* "~a" is "-a - 1". Silly, but easy. */
6733 return simplify_giv_expr (loop,
6734 gen_rtx_MINUS (mode,
6735 gen_rtx_NEG (mode, XEXP (x, 0)),
6736 const1_rtx),
6737 ext_val, benefit);
6739 case USE:
6740 /* Already in proper form for invariant. */
6741 return x;
6743 case SIGN_EXTEND:
6744 case ZERO_EXTEND:
6745 case TRUNCATE:
6746 /* Conditionally recognize extensions of simple IVs. After we've
6747 computed loop traversal counts and verified the range of the
6748 source IV, we'll reevaluate this as a GIV. */
6749 if (*ext_val == NULL_RTX)
6751 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6752 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6754 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6755 return arg0;
6758 goto do_default;
6760 case REG:
6761 /* If this is a new register, we can't deal with it. */
6762 if (REGNO (x) >= max_reg_before_loop)
6763 return 0;
6765 /* Check for biv or giv. */
6766 switch (REG_IV_TYPE (ivs, REGNO (x)))
6768 case BASIC_INDUCT:
6769 return x;
6770 case GENERAL_INDUCT:
6772 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6774 /* Form expression from giv and add benefit. Ensure this giv
6775 can derive another and subtract any needed adjustment if so. */
6777 /* Increasing the benefit here is risky. The only case in which it
6778 is arguably correct is if this is the only use of V. In other
6779 cases, this will artificially inflate the benefit of the current
6780 giv, and lead to suboptimal code. Thus, it is disabled, since
6781 potentially not reducing an only marginally beneficial giv is
6782 less harmful than reducing many givs that are not really
6783 beneficial. */
6785 rtx single_use = regs->array[REGNO (x)].single_usage;
6786 if (single_use && single_use != const0_rtx)
6787 *benefit += v->benefit;
6790 if (v->cant_derive)
6791 return 0;
6793 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6794 v->src_reg, v->mult_val),
6795 v->add_val);
6797 if (v->derive_adjustment)
6798 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6799 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6800 if (*ext_val)
6802 if (!v->ext_dependent)
6803 return arg0;
6805 else
6807 *ext_val = v->ext_dependent;
6808 return arg0;
6810 return 0;
6813 default:
6814 do_default:
6815 /* If it isn't an induction variable, and it is invariant, we
6816 may be able to simplify things further by looking through
6817 the bits we just moved outside the loop. */
6818 if (loop_invariant_p (loop, x) == 1)
6820 struct movable *m;
6821 struct loop_movables *movables = LOOP_MOVABLES (loop);
6823 for (m = movables->head; m; m = m->next)
6824 if (rtx_equal_p (x, m->set_dest))
6826 /* Ok, we found a match. Substitute and simplify. */
6828 /* If we match another movable, we must use that, as
6829 this one is going away. */
6830 if (m->match)
6831 return simplify_giv_expr (loop, m->match->set_dest,
6832 ext_val, benefit);
6834 /* If consec is nonzero, this is a member of a group of
6835 instructions that were moved together. We handle this
6836 case only to the point of seeking to the last insn and
6837 looking for a REG_EQUAL. Fail if we don't find one. */
6838 if (m->consec != 0)
6840 int i = m->consec;
6841 tem = m->insn;
6844 tem = NEXT_INSN (tem);
6846 while (--i > 0);
6848 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6849 if (tem)
6850 tem = XEXP (tem, 0);
6852 else
6854 tem = single_set (m->insn);
6855 if (tem)
6856 tem = SET_SRC (tem);
6859 if (tem)
6861 /* What we are most interested in is pointer
6862 arithmetic on invariants -- only take
6863 patterns we may be able to do something with. */
6864 if (GET_CODE (tem) == PLUS
6865 || GET_CODE (tem) == MULT
6866 || GET_CODE (tem) == ASHIFT
6867 || GET_CODE (tem) == CONST_INT
6868 || GET_CODE (tem) == SYMBOL_REF)
6870 tem = simplify_giv_expr (loop, tem, ext_val,
6871 benefit);
6872 if (tem)
6873 return tem;
6875 else if (GET_CODE (tem) == CONST
6876 && GET_CODE (XEXP (tem, 0)) == PLUS
6877 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6878 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6880 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6881 ext_val, benefit);
6882 if (tem)
6883 return tem;
6886 break;
6889 break;
6892 /* Fall through to general case. */
6893 default:
6894 /* If invariant, return as USE (unless CONST_INT).
6895 Otherwise, not giv. */
6896 if (GET_CODE (x) == USE)
6897 x = XEXP (x, 0);
6899 if (loop_invariant_p (loop, x) == 1)
6901 if (GET_CODE (x) == CONST_INT)
6902 return x;
6903 if (GET_CODE (x) == CONST
6904 && GET_CODE (XEXP (x, 0)) == PLUS
6905 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6906 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6907 x = XEXP (x, 0);
6908 return gen_rtx_USE (mode, x);
6910 else
6911 return 0;
6915 /* This routine folds invariants such that there is only ever one
6916 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6918 static rtx
6919 sge_plus_constant (rtx x, rtx c)
6921 if (GET_CODE (x) == CONST_INT)
6922 return GEN_INT (INTVAL (x) + INTVAL (c));
6923 else if (GET_CODE (x) != PLUS)
6924 return gen_rtx_PLUS (GET_MODE (x), x, c);
6925 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6927 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6928 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6930 else if (GET_CODE (XEXP (x, 0)) == PLUS
6931 || GET_CODE (XEXP (x, 1)) != PLUS)
6933 return gen_rtx_PLUS (GET_MODE (x),
6934 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6936 else
6938 return gen_rtx_PLUS (GET_MODE (x),
6939 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6943 static rtx
6944 sge_plus (enum machine_mode mode, rtx x, rtx y)
6946 while (GET_CODE (y) == PLUS)
6948 rtx a = XEXP (y, 0);
6949 if (GET_CODE (a) == CONST_INT)
6950 x = sge_plus_constant (x, a);
6951 else
6952 x = gen_rtx_PLUS (mode, x, a);
6953 y = XEXP (y, 1);
6955 if (GET_CODE (y) == CONST_INT)
6956 x = sge_plus_constant (x, y);
6957 else
6958 x = gen_rtx_PLUS (mode, x, y);
6959 return x;
6962 /* Help detect a giv that is calculated by several consecutive insns;
6963 for example,
6964 giv = biv * M
6965 giv = giv + A
6966 The caller has already identified the first insn P as having a giv as dest;
6967 we check that all other insns that set the same register follow
6968 immediately after P, that they alter nothing else,
6969 and that the result of the last is still a giv.
6971 The value is 0 if the reg set in P is not really a giv.
6972 Otherwise, the value is the amount gained by eliminating
6973 all the consecutive insns that compute the value.
6975 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6976 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6978 The coefficients of the ultimate giv value are stored in
6979 *MULT_VAL and *ADD_VAL. */
6981 static int
6982 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
6983 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
6984 rtx *ext_val, rtx *last_consec_insn)
6986 struct loop_ivs *ivs = LOOP_IVS (loop);
6987 struct loop_regs *regs = LOOP_REGS (loop);
6988 int count;
6989 enum rtx_code code;
6990 int benefit;
6991 rtx temp;
6992 rtx set;
6994 /* Indicate that this is a giv so that we can update the value produced in
6995 each insn of the multi-insn sequence.
6997 This induction structure will be used only by the call to
6998 general_induction_var below, so we can allocate it on our stack.
6999 If this is a giv, our caller will replace the induct var entry with
7000 a new induction structure. */
7001 struct induction *v;
7003 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7004 return 0;
7006 v = alloca (sizeof (struct induction));
7007 v->src_reg = src_reg;
7008 v->mult_val = *mult_val;
7009 v->add_val = *add_val;
7010 v->benefit = first_benefit;
7011 v->cant_derive = 0;
7012 v->derive_adjustment = 0;
7013 v->ext_dependent = NULL_RTX;
7015 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7016 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7018 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7020 while (count > 0)
7022 p = NEXT_INSN (p);
7023 code = GET_CODE (p);
7025 /* If libcall, skip to end of call sequence. */
7026 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7027 p = XEXP (temp, 0);
7029 if (code == INSN
7030 && (set = single_set (p))
7031 && GET_CODE (SET_DEST (set)) == REG
7032 && SET_DEST (set) == dest_reg
7033 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7034 add_val, mult_val, ext_val, 0,
7035 &benefit, VOIDmode)
7036 /* Giv created by equivalent expression. */
7037 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7038 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7039 add_val, mult_val, ext_val, 0,
7040 &benefit, VOIDmode)))
7041 && src_reg == v->src_reg)
7043 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7044 benefit += libcall_benefit (p);
7046 count--;
7047 v->mult_val = *mult_val;
7048 v->add_val = *add_val;
7049 v->benefit += benefit;
7051 else if (code != NOTE)
7053 /* Allow insns that set something other than this giv to a
7054 constant. Such insns are needed on machines which cannot
7055 include long constants and should not disqualify a giv. */
7056 if (code == INSN
7057 && (set = single_set (p))
7058 && SET_DEST (set) != dest_reg
7059 && CONSTANT_P (SET_SRC (set)))
7060 continue;
7062 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7063 return 0;
7067 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7068 *last_consec_insn = p;
7069 return v->benefit;
7072 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7073 represented by G1. If no such expression can be found, or it is clear that
7074 it cannot possibly be a valid address, 0 is returned.
7076 To perform the computation, we note that
7077 G1 = x * v + a and
7078 G2 = y * v + b
7079 where `v' is the biv.
7081 So G2 = (y/b) * G1 + (b - a*y/x).
7083 Note that MULT = y/x.
7085 Update: A and B are now allowed to be additive expressions such that
7086 B contains all variables in A. That is, computing B-A will not require
7087 subtracting variables. */
7089 static rtx
7090 express_from_1 (rtx a, rtx b, rtx mult)
7092 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7094 if (mult == const0_rtx)
7095 return b;
7097 /* If MULT is not 1, we cannot handle A with non-constants, since we
7098 would then be required to subtract multiples of the registers in A.
7099 This is theoretically possible, and may even apply to some Fortran
7100 constructs, but it is a lot of work and we do not attempt it here. */
7102 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7103 return NULL_RTX;
7105 /* In general these structures are sorted top to bottom (down the PLUS
7106 chain), but not left to right across the PLUS. If B is a higher
7107 order giv than A, we can strip one level and recurse. If A is higher
7108 order, we'll eventually bail out, but won't know that until the end.
7109 If they are the same, we'll strip one level around this loop. */
7111 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7113 rtx ra, rb, oa, ob, tmp;
7115 ra = XEXP (a, 0), oa = XEXP (a, 1);
7116 if (GET_CODE (ra) == PLUS)
7117 tmp = ra, ra = oa, oa = tmp;
7119 rb = XEXP (b, 0), ob = XEXP (b, 1);
7120 if (GET_CODE (rb) == PLUS)
7121 tmp = rb, rb = ob, ob = tmp;
7123 if (rtx_equal_p (ra, rb))
7124 /* We matched: remove one reg completely. */
7125 a = oa, b = ob;
7126 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7127 /* An alternate match. */
7128 a = oa, b = rb;
7129 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7130 /* An alternate match. */
7131 a = ra, b = ob;
7132 else
7134 /* Indicates an extra register in B. Strip one level from B and
7135 recurse, hoping B was the higher order expression. */
7136 ob = express_from_1 (a, ob, mult);
7137 if (ob == NULL_RTX)
7138 return NULL_RTX;
7139 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7143 /* Here we are at the last level of A, go through the cases hoping to
7144 get rid of everything but a constant. */
7146 if (GET_CODE (a) == PLUS)
7148 rtx ra, oa;
7150 ra = XEXP (a, 0), oa = XEXP (a, 1);
7151 if (rtx_equal_p (oa, b))
7152 oa = ra;
7153 else if (!rtx_equal_p (ra, b))
7154 return NULL_RTX;
7156 if (GET_CODE (oa) != CONST_INT)
7157 return NULL_RTX;
7159 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7161 else if (GET_CODE (a) == CONST_INT)
7163 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7165 else if (CONSTANT_P (a))
7167 enum machine_mode mode_a = GET_MODE (a);
7168 enum machine_mode mode_b = GET_MODE (b);
7169 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7170 return simplify_gen_binary (MINUS, mode, b, a);
7172 else if (GET_CODE (b) == PLUS)
7174 if (rtx_equal_p (a, XEXP (b, 0)))
7175 return XEXP (b, 1);
7176 else if (rtx_equal_p (a, XEXP (b, 1)))
7177 return XEXP (b, 0);
7178 else
7179 return NULL_RTX;
7181 else if (rtx_equal_p (a, b))
7182 return const0_rtx;
7184 return NULL_RTX;
7188 express_from (struct induction *g1, struct induction *g2)
7190 rtx mult, add;
7192 /* The value that G1 will be multiplied by must be a constant integer. Also,
7193 the only chance we have of getting a valid address is if b*c/a (see above
7194 for notation) is also an integer. */
7195 if (GET_CODE (g1->mult_val) == CONST_INT
7196 && GET_CODE (g2->mult_val) == CONST_INT)
7198 if (g1->mult_val == const0_rtx
7199 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7200 return NULL_RTX;
7201 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7203 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7204 mult = const1_rtx;
7205 else
7207 /* ??? Find out if the one is a multiple of the other? */
7208 return NULL_RTX;
7211 add = express_from_1 (g1->add_val, g2->add_val, mult);
7212 if (add == NULL_RTX)
7214 /* Failed. If we've got a multiplication factor between G1 and G2,
7215 scale G1's addend and try again. */
7216 if (INTVAL (mult) > 1)
7218 rtx g1_add_val = g1->add_val;
7219 if (GET_CODE (g1_add_val) == MULT
7220 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7222 HOST_WIDE_INT m;
7223 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7224 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7225 XEXP (g1_add_val, 0), GEN_INT (m));
7227 else
7229 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7230 mult);
7233 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7236 if (add == NULL_RTX)
7237 return NULL_RTX;
7239 /* Form simplified final result. */
7240 if (mult == const0_rtx)
7241 return add;
7242 else if (mult == const1_rtx)
7243 mult = g1->dest_reg;
7244 else
7245 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7247 if (add == const0_rtx)
7248 return mult;
7249 else
7251 if (GET_CODE (add) == PLUS
7252 && CONSTANT_P (XEXP (add, 1)))
7254 rtx tem = XEXP (add, 1);
7255 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7256 add = tem;
7259 return gen_rtx_PLUS (g2->mode, mult, add);
7263 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7264 represented by G1. This indicates that G2 should be combined with G1 and
7265 that G2 can use (either directly or via an address expression) a register
7266 used to represent G1. */
7268 static rtx
7269 combine_givs_p (struct induction *g1, struct induction *g2)
7271 rtx comb, ret;
7273 /* With the introduction of ext dependent givs, we must care for modes.
7274 G2 must not use a wider mode than G1. */
7275 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7276 return NULL_RTX;
7278 ret = comb = express_from (g1, g2);
7279 if (comb == NULL_RTX)
7280 return NULL_RTX;
7281 if (g1->mode != g2->mode)
7282 ret = gen_lowpart (g2->mode, comb);
7284 /* If these givs are identical, they can be combined. We use the results
7285 of express_from because the addends are not in a canonical form, so
7286 rtx_equal_p is a weaker test. */
7287 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7288 combination to be the other way round. */
7289 if (comb == g1->dest_reg
7290 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7292 return ret;
7295 /* If G2 can be expressed as a function of G1 and that function is valid
7296 as an address and no more expensive than using a register for G2,
7297 the expression of G2 in terms of G1 can be used. */
7298 if (ret != NULL_RTX
7299 && g2->giv_type == DEST_ADDR
7300 && memory_address_p (GET_MODE (g2->mem), ret))
7301 return ret;
7303 return NULL_RTX;
7306 /* Check each extension dependent giv in this class to see if its
7307 root biv is safe from wrapping in the interior mode, which would
7308 make the giv illegal. */
7310 static void
7311 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
7313 struct loop_info *loop_info = LOOP_INFO (loop);
7314 int ze_ok = 0, se_ok = 0, info_ok = 0;
7315 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7316 HOST_WIDE_INT start_val;
7317 unsigned HOST_WIDE_INT u_end_val = 0;
7318 unsigned HOST_WIDE_INT u_start_val = 0;
7319 rtx incr = pc_rtx;
7320 struct induction *v;
7322 /* Make sure the iteration data is available. We must have
7323 constants in order to be certain of no overflow. */
7324 if (loop_info->n_iterations > 0
7325 && bl->initial_value
7326 && GET_CODE (bl->initial_value) == CONST_INT
7327 && (incr = biv_total_increment (bl))
7328 && GET_CODE (incr) == CONST_INT
7329 /* Make sure the host can represent the arithmetic. */
7330 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7332 unsigned HOST_WIDE_INT abs_incr, total_incr;
7333 HOST_WIDE_INT s_end_val;
7334 int neg_incr;
7336 info_ok = 1;
7337 start_val = INTVAL (bl->initial_value);
7338 u_start_val = start_val;
7340 neg_incr = 0, abs_incr = INTVAL (incr);
7341 if (INTVAL (incr) < 0)
7342 neg_incr = 1, abs_incr = -abs_incr;
7343 total_incr = abs_incr * loop_info->n_iterations;
7345 /* Check for host arithmetic overflow. */
7346 if (total_incr / loop_info->n_iterations == abs_incr)
7348 unsigned HOST_WIDE_INT u_max;
7349 HOST_WIDE_INT s_max;
7351 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7352 s_end_val = u_end_val;
7353 u_max = GET_MODE_MASK (biv_mode);
7354 s_max = u_max >> 1;
7356 /* Check zero extension of biv ok. */
7357 if (start_val >= 0
7358 /* Check for host arithmetic overflow. */
7359 && (neg_incr
7360 ? u_end_val < u_start_val
7361 : u_end_val > u_start_val)
7362 /* Check for target arithmetic overflow. */
7363 && (neg_incr
7364 ? 1 /* taken care of with host overflow */
7365 : u_end_val <= u_max))
7367 ze_ok = 1;
7370 /* Check sign extension of biv ok. */
7371 /* ??? While it is true that overflow with signed and pointer
7372 arithmetic is undefined, I fear too many programmers don't
7373 keep this fact in mind -- myself included on occasion.
7374 So leave alone with the signed overflow optimizations. */
7375 if (start_val >= -s_max - 1
7376 /* Check for host arithmetic overflow. */
7377 && (neg_incr
7378 ? s_end_val < start_val
7379 : s_end_val > start_val)
7380 /* Check for target arithmetic overflow. */
7381 && (neg_incr
7382 ? s_end_val >= -s_max - 1
7383 : s_end_val <= s_max))
7385 se_ok = 1;
7390 /* If we know the BIV is compared at run-time against an
7391 invariant value, and the increment is +/- 1, we may also
7392 be able to prove that the BIV cannot overflow. */
7393 else if (bl->biv->src_reg == loop_info->iteration_var
7394 && loop_info->comparison_value
7395 && loop_invariant_p (loop, loop_info->comparison_value)
7396 && (incr = biv_total_increment (bl))
7397 && GET_CODE (incr) == CONST_INT)
7399 /* If the increment is +1, and the exit test is a <,
7400 the BIV cannot overflow. (For <=, we have the
7401 problematic case that the comparison value might
7402 be the maximum value of the range.) */
7403 if (INTVAL (incr) == 1)
7405 if (loop_info->comparison_code == LT)
7406 se_ok = ze_ok = 1;
7407 else if (loop_info->comparison_code == LTU)
7408 ze_ok = 1;
7411 /* Likewise for increment -1 and exit test >. */
7412 if (INTVAL (incr) == -1)
7414 if (loop_info->comparison_code == GT)
7415 se_ok = ze_ok = 1;
7416 else if (loop_info->comparison_code == GTU)
7417 ze_ok = 1;
7421 /* Invalidate givs that fail the tests. */
7422 for (v = bl->giv; v; v = v->next_iv)
7423 if (v->ext_dependent)
7425 enum rtx_code code = GET_CODE (v->ext_dependent);
7426 int ok = 0;
7428 switch (code)
7430 case SIGN_EXTEND:
7431 ok = se_ok;
7432 break;
7433 case ZERO_EXTEND:
7434 ok = ze_ok;
7435 break;
7437 case TRUNCATE:
7438 /* We don't know whether this value is being used as either
7439 signed or unsigned, so to safely truncate we must satisfy
7440 both. The initial check here verifies the BIV itself;
7441 once that is successful we may check its range wrt the
7442 derived GIV. This works only if we were able to determine
7443 constant start and end values above. */
7444 if (se_ok && ze_ok && info_ok)
7446 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7447 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7449 /* We know from the above that both endpoints are nonnegative,
7450 and that there is no wrapping. Verify that both endpoints
7451 are within the (signed) range of the outer mode. */
7452 if (u_start_val <= max && u_end_val <= max)
7453 ok = 1;
7455 break;
7457 default:
7458 abort ();
7461 if (ok)
7463 if (loop_dump_stream)
7465 fprintf (loop_dump_stream,
7466 "Verified ext dependent giv at %d of reg %d\n",
7467 INSN_UID (v->insn), bl->regno);
7470 else
7472 if (loop_dump_stream)
7474 const char *why;
7476 if (info_ok)
7477 why = "biv iteration values overflowed";
7478 else
7480 if (incr == pc_rtx)
7481 incr = biv_total_increment (bl);
7482 if (incr == const1_rtx)
7483 why = "biv iteration info incomplete; incr by 1";
7484 else
7485 why = "biv iteration info incomplete";
7488 fprintf (loop_dump_stream,
7489 "Failed ext dependent giv at %d, %s\n",
7490 INSN_UID (v->insn), why);
7492 v->ignore = 1;
7493 bl->all_reduced = 0;
7498 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7501 extend_value_for_giv (struct induction *v, rtx value)
7503 rtx ext_dep = v->ext_dependent;
7505 if (! ext_dep)
7506 return value;
7508 /* Recall that check_ext_dependent_givs verified that the known bounds
7509 of a biv did not overflow or wrap with respect to the extension for
7510 the giv. Therefore, constants need no additional adjustment. */
7511 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7512 return value;
7514 /* Otherwise, we must adjust the value to compensate for the
7515 differing modes of the biv and the giv. */
7516 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7519 struct combine_givs_stats
7521 int giv_number;
7522 int total_benefit;
7525 static int
7526 cmp_combine_givs_stats (const void *xp, const void *yp)
7528 const struct combine_givs_stats * const x =
7529 (const struct combine_givs_stats *) xp;
7530 const struct combine_givs_stats * const y =
7531 (const struct combine_givs_stats *) yp;
7532 int d;
7533 d = y->total_benefit - x->total_benefit;
7534 /* Stabilize the sort. */
7535 if (!d)
7536 d = x->giv_number - y->giv_number;
7537 return d;
7540 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7541 any other. If so, point SAME to the giv combined with and set NEW_REG to
7542 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7543 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7545 static void
7546 combine_givs (struct loop_regs *regs, struct iv_class *bl)
7548 /* Additional benefit to add for being combined multiple times. */
7549 const int extra_benefit = 3;
7551 struct induction *g1, *g2, **giv_array;
7552 int i, j, k, giv_count;
7553 struct combine_givs_stats *stats;
7554 rtx *can_combine;
7556 /* Count givs, because bl->giv_count is incorrect here. */
7557 giv_count = 0;
7558 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7559 if (!g1->ignore)
7560 giv_count++;
7562 giv_array = alloca (giv_count * sizeof (struct induction *));
7563 i = 0;
7564 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7565 if (!g1->ignore)
7566 giv_array[i++] = g1;
7568 stats = xcalloc (giv_count, sizeof (*stats));
7569 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
7571 for (i = 0; i < giv_count; i++)
7573 int this_benefit;
7574 rtx single_use;
7576 g1 = giv_array[i];
7577 stats[i].giv_number = i;
7579 /* If a DEST_REG GIV is used only once, do not allow it to combine
7580 with anything, for in doing so we will gain nothing that cannot
7581 be had by simply letting the GIV with which we would have combined
7582 to be reduced on its own. The losage shows up in particular with
7583 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7584 be seen elsewhere as well. */
7585 if (g1->giv_type == DEST_REG
7586 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7587 && single_use != const0_rtx)
7588 continue;
7590 this_benefit = g1->benefit;
7591 /* Add an additional weight for zero addends. */
7592 if (g1->no_const_addval)
7593 this_benefit += 1;
7595 for (j = 0; j < giv_count; j++)
7597 rtx this_combine;
7599 g2 = giv_array[j];
7600 if (g1 != g2
7601 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7603 can_combine[i * giv_count + j] = this_combine;
7604 this_benefit += g2->benefit + extra_benefit;
7607 stats[i].total_benefit = this_benefit;
7610 /* Iterate, combining until we can't. */
7611 restart:
7612 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7614 if (loop_dump_stream)
7616 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7617 for (k = 0; k < giv_count; k++)
7619 g1 = giv_array[stats[k].giv_number];
7620 if (!g1->combined_with && !g1->same)
7621 fprintf (loop_dump_stream, " {%d, %d}",
7622 INSN_UID (giv_array[stats[k].giv_number]->insn),
7623 stats[k].total_benefit);
7625 putc ('\n', loop_dump_stream);
7628 for (k = 0; k < giv_count; k++)
7630 int g1_add_benefit = 0;
7632 i = stats[k].giv_number;
7633 g1 = giv_array[i];
7635 /* If it has already been combined, skip. */
7636 if (g1->combined_with || g1->same)
7637 continue;
7639 for (j = 0; j < giv_count; j++)
7641 g2 = giv_array[j];
7642 if (g1 != g2 && can_combine[i * giv_count + j]
7643 /* If it has already been combined, skip. */
7644 && ! g2->same && ! g2->combined_with)
7646 int l;
7648 g2->new_reg = can_combine[i * giv_count + j];
7649 g2->same = g1;
7650 /* For destination, we now may replace by mem expression instead
7651 of register. This changes the costs considerably, so add the
7652 compensation. */
7653 if (g2->giv_type == DEST_ADDR)
7654 g2->benefit = (g2->benefit + reg_address_cost
7655 - address_cost (g2->new_reg,
7656 GET_MODE (g2->mem)));
7657 g1->combined_with++;
7658 g1->lifetime += g2->lifetime;
7660 g1_add_benefit += g2->benefit;
7662 /* ??? The new final_[bg]iv_value code does a much better job
7663 of finding replaceable giv's, and hence this code may no
7664 longer be necessary. */
7665 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7666 g1_add_benefit -= copy_cost;
7668 /* To help optimize the next set of combinations, remove
7669 this giv from the benefits of other potential mates. */
7670 for (l = 0; l < giv_count; ++l)
7672 int m = stats[l].giv_number;
7673 if (can_combine[m * giv_count + j])
7674 stats[l].total_benefit -= g2->benefit + extra_benefit;
7677 if (loop_dump_stream)
7678 fprintf (loop_dump_stream,
7679 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7680 INSN_UID (g2->insn), INSN_UID (g1->insn),
7681 g1->benefit, g1_add_benefit, g1->lifetime);
7685 /* To help optimize the next set of combinations, remove
7686 this giv from the benefits of other potential mates. */
7687 if (g1->combined_with)
7689 for (j = 0; j < giv_count; ++j)
7691 int m = stats[j].giv_number;
7692 if (can_combine[m * giv_count + i])
7693 stats[j].total_benefit -= g1->benefit + extra_benefit;
7696 g1->benefit += g1_add_benefit;
7698 /* We've finished with this giv, and everything it touched.
7699 Restart the combination so that proper weights for the
7700 rest of the givs are properly taken into account. */
7701 /* ??? Ideally we would compact the arrays at this point, so
7702 as to not cover old ground. But sanely compacting
7703 can_combine is tricky. */
7704 goto restart;
7708 /* Clean up. */
7709 free (stats);
7710 free (can_combine);
7713 /* Generate sequence for REG = B * M + A. B is the initial value of
7714 the basic induction variable, M a multiplicative constant, A an
7715 additive constant and REG the destination register. */
7717 static rtx
7718 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
7720 rtx seq;
7721 rtx result;
7723 start_sequence ();
7724 /* Use unsigned arithmetic. */
7725 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7726 if (reg != result)
7727 emit_move_insn (reg, result);
7728 seq = get_insns ();
7729 end_sequence ();
7731 return seq;
7735 /* Update registers created in insn sequence SEQ. */
7737 static void
7738 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
7740 rtx insn;
7742 /* Update register info for alias analysis. */
7744 insn = seq;
7745 while (insn != NULL_RTX)
7747 rtx set = single_set (insn);
7749 if (set && GET_CODE (SET_DEST (set)) == REG)
7750 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7752 insn = NEXT_INSN (insn);
7757 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7758 is the initial value of the basic induction variable, M a
7759 multiplicative constant, A an additive constant and REG the
7760 destination register. */
7762 void
7763 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7764 rtx reg, basic_block before_bb, rtx before_insn)
7766 rtx seq;
7768 if (! before_insn)
7770 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7771 return;
7774 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7775 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7777 /* Increase the lifetime of any invariants moved further in code. */
7778 update_reg_last_use (a, before_insn);
7779 update_reg_last_use (b, before_insn);
7780 update_reg_last_use (m, before_insn);
7782 /* It is possible that the expansion created lots of new registers.
7783 Iterate over the sequence we just created and record them all. We
7784 must do this before inserting the sequence. */
7785 loop_regs_update (loop, seq);
7787 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7791 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
7792 initial value of the basic induction variable, M a multiplicative
7793 constant, A an additive constant and REG the destination
7794 register. */
7796 void
7797 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7799 rtx seq;
7801 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7802 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7804 /* Increase the lifetime of any invariants moved further in code.
7805 ???? Is this really necessary? */
7806 update_reg_last_use (a, loop->sink);
7807 update_reg_last_use (b, loop->sink);
7808 update_reg_last_use (m, loop->sink);
7810 /* It is possible that the expansion created lots of new registers.
7811 Iterate over the sequence we just created and record them all. We
7812 must do this before inserting the sequence. */
7813 loop_regs_update (loop, seq);
7815 loop_insn_sink (loop, seq);
7819 /* Emit insns after loop to set REG = B * M + A. B is the initial
7820 value of the basic induction variable, M a multiplicative constant,
7821 A an additive constant and REG the destination register. */
7823 void
7824 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7826 rtx seq;
7828 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7829 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7831 /* It is possible that the expansion created lots of new registers.
7832 Iterate over the sequence we just created and record them all. We
7833 must do this before inserting the sequence. */
7834 loop_regs_update (loop, seq);
7836 loop_insn_hoist (loop, seq);
7841 /* Similar to gen_add_mult, but compute cost rather than generating
7842 sequence. */
7844 static int
7845 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
7847 int cost = 0;
7848 rtx last, result;
7850 start_sequence ();
7851 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7852 if (reg != result)
7853 emit_move_insn (reg, result);
7854 last = get_last_insn ();
7855 while (last)
7857 rtx t = single_set (last);
7858 if (t)
7859 cost += rtx_cost (SET_SRC (t), SET);
7860 last = PREV_INSN (last);
7862 end_sequence ();
7863 return cost;
7866 /* Test whether A * B can be computed without
7867 an actual multiply insn. Value is 1 if so.
7869 ??? This function stinks because it generates a ton of wasted RTL
7870 ??? and as a result fragments GC memory to no end. There are other
7871 ??? places in the compiler which are invoked a lot and do the same
7872 ??? thing, generate wasted RTL just to see if something is possible. */
7874 static int
7875 product_cheap_p (rtx a, rtx b)
7877 rtx tmp;
7878 int win, n_insns;
7880 /* If only one is constant, make it B. */
7881 if (GET_CODE (a) == CONST_INT)
7882 tmp = a, a = b, b = tmp;
7884 /* If first constant, both constant, so don't need multiply. */
7885 if (GET_CODE (a) == CONST_INT)
7886 return 1;
7888 /* If second not constant, neither is constant, so would need multiply. */
7889 if (GET_CODE (b) != CONST_INT)
7890 return 0;
7892 /* One operand is constant, so might not need multiply insn. Generate the
7893 code for the multiply and see if a call or multiply, or long sequence
7894 of insns is generated. */
7896 start_sequence ();
7897 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7898 tmp = get_insns ();
7899 end_sequence ();
7901 win = 1;
7902 if (INSN_P (tmp))
7904 n_insns = 0;
7905 while (tmp != NULL_RTX)
7907 rtx next = NEXT_INSN (tmp);
7909 if (++n_insns > 3
7910 || GET_CODE (tmp) != INSN
7911 || (GET_CODE (PATTERN (tmp)) == SET
7912 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7913 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7914 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7915 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7917 win = 0;
7918 break;
7921 tmp = next;
7924 else if (GET_CODE (tmp) == SET
7925 && GET_CODE (SET_SRC (tmp)) == MULT)
7926 win = 0;
7927 else if (GET_CODE (tmp) == PARALLEL
7928 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7929 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7930 win = 0;
7932 return win;
7935 /* Check to see if loop can be terminated by a "decrement and branch until
7936 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7937 Also try reversing an increment loop to a decrement loop
7938 to see if the optimization can be performed.
7939 Value is nonzero if optimization was performed. */
7941 /* This is useful even if the architecture doesn't have such an insn,
7942 because it might change a loops which increments from 0 to n to a loop
7943 which decrements from n to 0. A loop that decrements to zero is usually
7944 faster than one that increments from zero. */
7946 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7947 such as approx_final_value, biv_total_increment, loop_iterations, and
7948 final_[bg]iv_value. */
7950 static int
7951 check_dbra_loop (struct loop *loop, int insn_count)
7953 struct loop_info *loop_info = LOOP_INFO (loop);
7954 struct loop_regs *regs = LOOP_REGS (loop);
7955 struct loop_ivs *ivs = LOOP_IVS (loop);
7956 struct iv_class *bl;
7957 rtx reg;
7958 rtx jump_label;
7959 rtx final_value;
7960 rtx start_value;
7961 rtx new_add_val;
7962 rtx comparison;
7963 rtx before_comparison;
7964 rtx p;
7965 rtx jump;
7966 rtx first_compare;
7967 int compare_and_branch;
7968 rtx loop_start = loop->start;
7969 rtx loop_end = loop->end;
7971 /* If last insn is a conditional branch, and the insn before tests a
7972 register value, try to optimize it. Otherwise, we can't do anything. */
7974 jump = PREV_INSN (loop_end);
7975 comparison = get_condition_for_loop (loop, jump);
7976 if (comparison == 0)
7977 return 0;
7978 if (!onlyjump_p (jump))
7979 return 0;
7981 /* Try to compute whether the compare/branch at the loop end is one or
7982 two instructions. */
7983 get_condition (jump, &first_compare, false);
7984 if (first_compare == jump)
7985 compare_and_branch = 1;
7986 else if (first_compare == prev_nonnote_insn (jump))
7987 compare_and_branch = 2;
7988 else
7989 return 0;
7992 /* If more than one condition is present to control the loop, then
7993 do not proceed, as this function does not know how to rewrite
7994 loop tests with more than one condition.
7996 Look backwards from the first insn in the last comparison
7997 sequence and see if we've got another comparison sequence. */
7999 rtx jump1;
8000 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8001 if (GET_CODE (jump1) == JUMP_INSN)
8002 return 0;
8005 /* Check all of the bivs to see if the compare uses one of them.
8006 Skip biv's set more than once because we can't guarantee that
8007 it will be zero on the last iteration. Also skip if the biv is
8008 used between its update and the test insn. */
8010 for (bl = ivs->list; bl; bl = bl->next)
8012 if (bl->biv_count == 1
8013 && ! bl->biv->maybe_multiple
8014 && bl->biv->dest_reg == XEXP (comparison, 0)
8015 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8016 first_compare))
8017 break;
8020 if (! bl)
8021 return 0;
8023 /* Look for the case where the basic induction variable is always
8024 nonnegative, and equals zero on the last iteration.
8025 In this case, add a reg_note REG_NONNEG, which allows the
8026 m68k DBRA instruction to be used. */
8028 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
8029 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8030 && GET_CODE (bl->biv->add_val) == CONST_INT
8031 && INTVAL (bl->biv->add_val) < 0)
8033 /* Initial value must be greater than 0,
8034 init_val % -dec_value == 0 to ensure that it equals zero on
8035 the last iteration */
8037 if (GET_CODE (bl->initial_value) == CONST_INT
8038 && INTVAL (bl->initial_value) > 0
8039 && (INTVAL (bl->initial_value)
8040 % (-INTVAL (bl->biv->add_val))) == 0)
8042 /* register always nonnegative, add REG_NOTE to branch */
8043 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8044 REG_NOTES (jump)
8045 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8046 REG_NOTES (jump));
8047 bl->nonneg = 1;
8049 return 1;
8052 /* If the decrement is 1 and the value was tested as >= 0 before
8053 the loop, then we can safely optimize. */
8054 for (p = loop_start; p; p = PREV_INSN (p))
8056 if (GET_CODE (p) == CODE_LABEL)
8057 break;
8058 if (GET_CODE (p) != JUMP_INSN)
8059 continue;
8061 before_comparison = get_condition_for_loop (loop, p);
8062 if (before_comparison
8063 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8064 && (GET_CODE (before_comparison) == LT
8065 || GET_CODE (before_comparison) == LTU)
8066 && XEXP (before_comparison, 1) == const0_rtx
8067 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8068 && INTVAL (bl->biv->add_val) == -1)
8070 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8071 REG_NOTES (jump)
8072 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8073 REG_NOTES (jump));
8074 bl->nonneg = 1;
8076 return 1;
8080 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8081 && INTVAL (bl->biv->add_val) > 0)
8083 /* Try to change inc to dec, so can apply above optimization. */
8084 /* Can do this if:
8085 all registers modified are induction variables or invariant,
8086 all memory references have non-overlapping addresses
8087 (obviously true if only one write)
8088 allow 2 insns for the compare/jump at the end of the loop. */
8089 /* Also, we must avoid any instructions which use both the reversed
8090 biv and another biv. Such instructions will fail if the loop is
8091 reversed. We meet this condition by requiring that either
8092 no_use_except_counting is true, or else that there is only
8093 one biv. */
8094 int num_nonfixed_reads = 0;
8095 /* 1 if the iteration var is used only to count iterations. */
8096 int no_use_except_counting = 0;
8097 /* 1 if the loop has no memory store, or it has a single memory store
8098 which is reversible. */
8099 int reversible_mem_store = 1;
8101 if (bl->giv_count == 0
8102 && !loop->exit_count
8103 && !loop_info->has_multiple_exit_targets)
8105 rtx bivreg = regno_reg_rtx[bl->regno];
8106 struct iv_class *blt;
8108 /* If there are no givs for this biv, and the only exit is the
8109 fall through at the end of the loop, then
8110 see if perhaps there are no uses except to count. */
8111 no_use_except_counting = 1;
8112 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8113 if (INSN_P (p))
8115 rtx set = single_set (p);
8117 if (set && GET_CODE (SET_DEST (set)) == REG
8118 && REGNO (SET_DEST (set)) == bl->regno)
8119 /* An insn that sets the biv is okay. */
8121 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8122 /* An insn that doesn't mention the biv is okay. */
8124 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8125 || p == prev_nonnote_insn (loop_end))
8127 /* If either of these insns uses the biv and sets a pseudo
8128 that has more than one usage, then the biv has uses
8129 other than counting since it's used to derive a value
8130 that is used more than one time. */
8131 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8132 regs);
8133 if (regs->multiple_uses)
8135 no_use_except_counting = 0;
8136 break;
8139 else
8141 no_use_except_counting = 0;
8142 break;
8146 /* A biv has uses besides counting if it is used to set
8147 another biv. */
8148 for (blt = ivs->list; blt; blt = blt->next)
8149 if (blt->init_set
8150 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8152 no_use_except_counting = 0;
8153 break;
8157 if (no_use_except_counting)
8158 /* No need to worry about MEMs. */
8160 else if (loop_info->num_mem_sets <= 1)
8162 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8163 if (INSN_P (p))
8164 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8166 /* If the loop has a single store, and the destination address is
8167 invariant, then we can't reverse the loop, because this address
8168 might then have the wrong value at loop exit.
8169 This would work if the source was invariant also, however, in that
8170 case, the insn should have been moved out of the loop. */
8172 if (loop_info->num_mem_sets == 1)
8174 struct induction *v;
8176 /* If we could prove that each of the memory locations
8177 written to was different, then we could reverse the
8178 store -- but we don't presently have any way of
8179 knowing that. */
8180 reversible_mem_store = 0;
8182 /* If the store depends on a register that is set after the
8183 store, it depends on the initial value, and is thus not
8184 reversible. */
8185 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8187 if (v->giv_type == DEST_REG
8188 && reg_mentioned_p (v->dest_reg,
8189 PATTERN (loop_info->first_loop_store_insn))
8190 && loop_insn_first_p (loop_info->first_loop_store_insn,
8191 v->insn))
8192 reversible_mem_store = 0;
8196 else
8197 return 0;
8199 /* This code only acts for innermost loops. Also it simplifies
8200 the memory address check by only reversing loops with
8201 zero or one memory access.
8202 Two memory accesses could involve parts of the same array,
8203 and that can't be reversed.
8204 If the biv is used only for counting, than we don't need to worry
8205 about all these things. */
8207 if ((num_nonfixed_reads <= 1
8208 && ! loop_info->has_nonconst_call
8209 && ! loop_info->has_prefetch
8210 && ! loop_info->has_volatile
8211 && reversible_mem_store
8212 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8213 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8214 && (bl == ivs->list && bl->next == 0))
8215 || (no_use_except_counting && ! loop_info->has_prefetch))
8217 rtx tem;
8219 /* Loop can be reversed. */
8220 if (loop_dump_stream)
8221 fprintf (loop_dump_stream, "Can reverse loop\n");
8223 /* Now check other conditions:
8225 The increment must be a constant, as must the initial value,
8226 and the comparison code must be LT.
8228 This test can probably be improved since +/- 1 in the constant
8229 can be obtained by changing LT to LE and vice versa; this is
8230 confusing. */
8232 if (comparison
8233 /* for constants, LE gets turned into LT */
8234 && (GET_CODE (comparison) == LT
8235 || (GET_CODE (comparison) == LE
8236 && no_use_except_counting)
8237 || GET_CODE (comparison) == LTU))
8239 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8240 rtx initial_value, comparison_value;
8241 int nonneg = 0;
8242 enum rtx_code cmp_code;
8243 int comparison_const_width;
8244 unsigned HOST_WIDE_INT comparison_sign_mask;
8246 add_val = INTVAL (bl->biv->add_val);
8247 comparison_value = XEXP (comparison, 1);
8248 if (GET_MODE (comparison_value) == VOIDmode)
8249 comparison_const_width
8250 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8251 else
8252 comparison_const_width
8253 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8254 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8255 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8256 comparison_sign_mask
8257 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8259 /* If the comparison value is not a loop invariant, then we
8260 can not reverse this loop.
8262 ??? If the insns which initialize the comparison value as
8263 a whole compute an invariant result, then we could move
8264 them out of the loop and proceed with loop reversal. */
8265 if (! loop_invariant_p (loop, comparison_value))
8266 return 0;
8268 if (GET_CODE (comparison_value) == CONST_INT)
8269 comparison_val = INTVAL (comparison_value);
8270 initial_value = bl->initial_value;
8272 /* Normalize the initial value if it is an integer and
8273 has no other use except as a counter. This will allow
8274 a few more loops to be reversed. */
8275 if (no_use_except_counting
8276 && GET_CODE (comparison_value) == CONST_INT
8277 && GET_CODE (initial_value) == CONST_INT)
8279 comparison_val = comparison_val - INTVAL (bl->initial_value);
8280 /* The code below requires comparison_val to be a multiple
8281 of add_val in order to do the loop reversal, so
8282 round up comparison_val to a multiple of add_val.
8283 Since comparison_value is constant, we know that the
8284 current comparison code is LT. */
8285 comparison_val = comparison_val + add_val - 1;
8286 comparison_val
8287 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8288 /* We postpone overflow checks for COMPARISON_VAL here;
8289 even if there is an overflow, we might still be able to
8290 reverse the loop, if converting the loop exit test to
8291 NE is possible. */
8292 initial_value = const0_rtx;
8295 /* First check if we can do a vanilla loop reversal. */
8296 if (initial_value == const0_rtx
8297 /* If we have a decrement_and_branch_on_count,
8298 prefer the NE test, since this will allow that
8299 instruction to be generated. Note that we must
8300 use a vanilla loop reversal if the biv is used to
8301 calculate a giv or has a non-counting use. */
8302 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8303 && defined (HAVE_decrement_and_branch_on_count)
8304 && (! (add_val == 1 && loop->vtop
8305 && (bl->biv_count == 0
8306 || no_use_except_counting)))
8307 #endif
8308 && GET_CODE (comparison_value) == CONST_INT
8309 /* Now do postponed overflow checks on COMPARISON_VAL. */
8310 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8311 & comparison_sign_mask))
8313 /* Register will always be nonnegative, with value
8314 0 on last iteration */
8315 add_adjust = add_val;
8316 nonneg = 1;
8317 cmp_code = GE;
8319 else if (add_val == 1 && loop->vtop
8320 && (bl->biv_count == 0
8321 || no_use_except_counting))
8323 add_adjust = 0;
8324 cmp_code = NE;
8326 else
8327 return 0;
8329 if (GET_CODE (comparison) == LE)
8330 add_adjust -= add_val;
8332 /* If the initial value is not zero, or if the comparison
8333 value is not an exact multiple of the increment, then we
8334 can not reverse this loop. */
8335 if (initial_value == const0_rtx
8336 && GET_CODE (comparison_value) == CONST_INT)
8338 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8339 return 0;
8341 else
8343 if (! no_use_except_counting || add_val != 1)
8344 return 0;
8347 final_value = comparison_value;
8349 /* Reset these in case we normalized the initial value
8350 and comparison value above. */
8351 if (GET_CODE (comparison_value) == CONST_INT
8352 && GET_CODE (initial_value) == CONST_INT)
8354 comparison_value = GEN_INT (comparison_val);
8355 final_value
8356 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8358 bl->initial_value = initial_value;
8360 /* Save some info needed to produce the new insns. */
8361 reg = bl->biv->dest_reg;
8362 jump_label = condjump_label (PREV_INSN (loop_end));
8363 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8365 /* Set start_value; if this is not a CONST_INT, we need
8366 to generate a SUB.
8367 Initialize biv to start_value before loop start.
8368 The old initializing insn will be deleted as a
8369 dead store by flow.c. */
8370 if (initial_value == const0_rtx
8371 && GET_CODE (comparison_value) == CONST_INT)
8373 start_value = GEN_INT (comparison_val - add_adjust);
8374 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8376 else if (GET_CODE (initial_value) == CONST_INT)
8378 enum machine_mode mode = GET_MODE (reg);
8379 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8380 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8382 if (add_insn == 0)
8383 return 0;
8385 start_value
8386 = gen_rtx_PLUS (mode, comparison_value, offset);
8387 loop_insn_hoist (loop, add_insn);
8388 if (GET_CODE (comparison) == LE)
8389 final_value = gen_rtx_PLUS (mode, comparison_value,
8390 GEN_INT (add_val));
8392 else if (! add_adjust)
8394 enum machine_mode mode = GET_MODE (reg);
8395 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8396 initial_value);
8398 if (sub_insn == 0)
8399 return 0;
8400 start_value
8401 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8402 loop_insn_hoist (loop, sub_insn);
8404 else
8405 /* We could handle the other cases too, but it'll be
8406 better to have a testcase first. */
8407 return 0;
8409 /* We may not have a single insn which can increment a reg, so
8410 create a sequence to hold all the insns from expand_inc. */
8411 start_sequence ();
8412 expand_inc (reg, new_add_val);
8413 tem = get_insns ();
8414 end_sequence ();
8416 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8417 delete_insn (bl->biv->insn);
8419 /* Update biv info to reflect its new status. */
8420 bl->biv->insn = p;
8421 bl->initial_value = start_value;
8422 bl->biv->add_val = new_add_val;
8424 /* Update loop info. */
8425 loop_info->initial_value = reg;
8426 loop_info->initial_equiv_value = reg;
8427 loop_info->final_value = const0_rtx;
8428 loop_info->final_equiv_value = const0_rtx;
8429 loop_info->comparison_value = const0_rtx;
8430 loop_info->comparison_code = cmp_code;
8431 loop_info->increment = new_add_val;
8433 /* Inc LABEL_NUSES so that delete_insn will
8434 not delete the label. */
8435 LABEL_NUSES (XEXP (jump_label, 0))++;
8437 /* Emit an insn after the end of the loop to set the biv's
8438 proper exit value if it is used anywhere outside the loop. */
8439 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8440 || ! bl->init_insn
8441 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8442 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8444 /* Delete compare/branch at end of loop. */
8445 delete_related_insns (PREV_INSN (loop_end));
8446 if (compare_and_branch == 2)
8447 delete_related_insns (first_compare);
8449 /* Add new compare/branch insn at end of loop. */
8450 start_sequence ();
8451 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8452 GET_MODE (reg), 0,
8453 XEXP (jump_label, 0));
8454 tem = get_insns ();
8455 end_sequence ();
8456 emit_jump_insn_before (tem, loop_end);
8458 for (tem = PREV_INSN (loop_end);
8459 tem && GET_CODE (tem) != JUMP_INSN;
8460 tem = PREV_INSN (tem))
8463 if (tem)
8464 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8466 if (nonneg)
8468 if (tem)
8470 /* Increment of LABEL_NUSES done above. */
8471 /* Register is now always nonnegative,
8472 so add REG_NONNEG note to the branch. */
8473 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8474 REG_NOTES (tem));
8476 bl->nonneg = 1;
8479 /* No insn may reference both the reversed and another biv or it
8480 will fail (see comment near the top of the loop reversal
8481 code).
8482 Earlier on, we have verified that the biv has no use except
8483 counting, or it is the only biv in this function.
8484 However, the code that computes no_use_except_counting does
8485 not verify reg notes. It's possible to have an insn that
8486 references another biv, and has a REG_EQUAL note with an
8487 expression based on the reversed biv. To avoid this case,
8488 remove all REG_EQUAL notes based on the reversed biv
8489 here. */
8490 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8491 if (INSN_P (p))
8493 rtx *pnote;
8494 rtx set = single_set (p);
8495 /* If this is a set of a GIV based on the reversed biv, any
8496 REG_EQUAL notes should still be correct. */
8497 if (! set
8498 || GET_CODE (SET_DEST (set)) != REG
8499 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8500 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8501 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8502 for (pnote = &REG_NOTES (p); *pnote;)
8504 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8505 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8506 XEXP (*pnote, 0)))
8507 *pnote = XEXP (*pnote, 1);
8508 else
8509 pnote = &XEXP (*pnote, 1);
8513 /* Mark that this biv has been reversed. Each giv which depends
8514 on this biv, and which is also live past the end of the loop
8515 will have to be fixed up. */
8517 bl->reversed = 1;
8519 if (loop_dump_stream)
8521 fprintf (loop_dump_stream, "Reversed loop");
8522 if (bl->nonneg)
8523 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8524 else
8525 fprintf (loop_dump_stream, "\n");
8528 return 1;
8533 return 0;
8536 /* Verify whether the biv BL appears to be eliminable,
8537 based on the insns in the loop that refer to it.
8539 If ELIMINATE_P is nonzero, actually do the elimination.
8541 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8542 determine whether invariant insns should be placed inside or at the
8543 start of the loop. */
8545 static int
8546 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8547 int eliminate_p, int threshold, int insn_count)
8549 struct loop_ivs *ivs = LOOP_IVS (loop);
8550 rtx reg = bl->biv->dest_reg;
8551 rtx p;
8553 /* Scan all insns in the loop, stopping if we find one that uses the
8554 biv in a way that we cannot eliminate. */
8556 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8558 enum rtx_code code = GET_CODE (p);
8559 basic_block where_bb = 0;
8560 rtx where_insn = threshold >= insn_count ? 0 : p;
8561 rtx note;
8563 /* If this is a libcall that sets a giv, skip ahead to its end. */
8564 if (GET_RTX_CLASS (code) == 'i')
8566 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8568 if (note)
8570 rtx last = XEXP (note, 0);
8571 rtx set = single_set (last);
8573 if (set && GET_CODE (SET_DEST (set)) == REG)
8575 unsigned int regno = REGNO (SET_DEST (set));
8577 if (regno < ivs->n_regs
8578 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8579 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8580 p = last;
8585 /* Closely examine the insn if the biv is mentioned. */
8586 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8587 && reg_mentioned_p (reg, PATTERN (p))
8588 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8589 eliminate_p, where_bb, where_insn))
8591 if (loop_dump_stream)
8592 fprintf (loop_dump_stream,
8593 "Cannot eliminate biv %d: biv used in insn %d.\n",
8594 bl->regno, INSN_UID (p));
8595 break;
8598 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8599 if (eliminate_p
8600 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8601 && reg_mentioned_p (reg, XEXP (note, 0)))
8602 remove_note (p, note);
8605 if (p == loop->end)
8607 if (loop_dump_stream)
8608 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8609 bl->regno, eliminate_p ? "was" : "can be");
8610 return 1;
8613 return 0;
8616 /* INSN and REFERENCE are instructions in the same insn chain.
8617 Return nonzero if INSN is first. */
8620 loop_insn_first_p (rtx insn, rtx reference)
8622 rtx p, q;
8624 for (p = insn, q = reference;;)
8626 /* Start with test for not first so that INSN == REFERENCE yields not
8627 first. */
8628 if (q == insn || ! p)
8629 return 0;
8630 if (p == reference || ! q)
8631 return 1;
8633 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8634 previous insn, hence the <= comparison below does not work if
8635 P is a note. */
8636 if (INSN_UID (p) < max_uid_for_loop
8637 && INSN_UID (q) < max_uid_for_loop
8638 && GET_CODE (p) != NOTE)
8639 return INSN_LUID (p) <= INSN_LUID (q);
8641 if (INSN_UID (p) >= max_uid_for_loop
8642 || GET_CODE (p) == NOTE)
8643 p = NEXT_INSN (p);
8644 if (INSN_UID (q) >= max_uid_for_loop)
8645 q = NEXT_INSN (q);
8649 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8650 the offset that we have to take into account due to auto-increment /
8651 div derivation is zero. */
8652 static int
8653 biv_elimination_giv_has_0_offset (struct induction *biv,
8654 struct induction *giv, rtx insn)
8656 /* If the giv V had the auto-inc address optimization applied
8657 to it, and INSN occurs between the giv insn and the biv
8658 insn, then we'd have to adjust the value used here.
8659 This is rare, so we don't bother to make this possible. */
8660 if (giv->auto_inc_opt
8661 && ((loop_insn_first_p (giv->insn, insn)
8662 && loop_insn_first_p (insn, biv->insn))
8663 || (loop_insn_first_p (biv->insn, insn)
8664 && loop_insn_first_p (insn, giv->insn))))
8665 return 0;
8667 return 1;
8670 /* If BL appears in X (part of the pattern of INSN), see if we can
8671 eliminate its use. If so, return 1. If not, return 0.
8673 If BIV does not appear in X, return 1.
8675 If ELIMINATE_P is nonzero, actually do the elimination.
8676 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8677 Depending on how many items have been moved out of the loop, it
8678 will either be before INSN (when WHERE_INSN is nonzero) or at the
8679 start of the loop (when WHERE_INSN is zero). */
8681 static int
8682 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8683 struct iv_class *bl, int eliminate_p,
8684 basic_block where_bb, rtx where_insn)
8686 enum rtx_code code = GET_CODE (x);
8687 rtx reg = bl->biv->dest_reg;
8688 enum machine_mode mode = GET_MODE (reg);
8689 struct induction *v;
8690 rtx arg, tem;
8691 #ifdef HAVE_cc0
8692 rtx new;
8693 #endif
8694 int arg_operand;
8695 const char *fmt;
8696 int i, j;
8698 switch (code)
8700 case REG:
8701 /* If we haven't already been able to do something with this BIV,
8702 we can't eliminate it. */
8703 if (x == reg)
8704 return 0;
8705 return 1;
8707 case SET:
8708 /* If this sets the BIV, it is not a problem. */
8709 if (SET_DEST (x) == reg)
8710 return 1;
8712 /* If this is an insn that defines a giv, it is also ok because
8713 it will go away when the giv is reduced. */
8714 for (v = bl->giv; v; v = v->next_iv)
8715 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8716 return 1;
8718 #ifdef HAVE_cc0
8719 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8721 /* Can replace with any giv that was reduced and
8722 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8723 Require a constant for MULT_VAL, so we know it's nonzero.
8724 ??? We disable this optimization to avoid potential
8725 overflows. */
8727 for (v = bl->giv; v; v = v->next_iv)
8728 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8729 && v->add_val == const0_rtx
8730 && ! v->ignore && ! v->maybe_dead && v->always_computable
8731 && v->mode == mode
8732 && 0)
8734 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8735 continue;
8737 if (! eliminate_p)
8738 return 1;
8740 /* If the giv has the opposite direction of change,
8741 then reverse the comparison. */
8742 if (INTVAL (v->mult_val) < 0)
8743 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8744 const0_rtx, v->new_reg);
8745 else
8746 new = v->new_reg;
8748 /* We can probably test that giv's reduced reg. */
8749 if (validate_change (insn, &SET_SRC (x), new, 0))
8750 return 1;
8753 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8754 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8755 Require a constant for MULT_VAL, so we know it's nonzero.
8756 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8757 overflow problem. */
8759 for (v = bl->giv; v; v = v->next_iv)
8760 if (GET_CODE (v->mult_val) == CONST_INT
8761 && v->mult_val != const0_rtx
8762 && ! v->ignore && ! v->maybe_dead && v->always_computable
8763 && v->mode == mode
8764 && (GET_CODE (v->add_val) == SYMBOL_REF
8765 || GET_CODE (v->add_val) == LABEL_REF
8766 || GET_CODE (v->add_val) == CONST
8767 || (GET_CODE (v->add_val) == REG
8768 && REG_POINTER (v->add_val))))
8770 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8771 continue;
8773 if (! eliminate_p)
8774 return 1;
8776 /* If the giv has the opposite direction of change,
8777 then reverse the comparison. */
8778 if (INTVAL (v->mult_val) < 0)
8779 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8780 v->new_reg);
8781 else
8782 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8783 copy_rtx (v->add_val));
8785 /* Replace biv with the giv's reduced register. */
8786 update_reg_last_use (v->add_val, insn);
8787 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8788 return 1;
8790 /* Insn doesn't support that constant or invariant. Copy it
8791 into a register (it will be a loop invariant.) */
8792 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8794 loop_insn_emit_before (loop, 0, where_insn,
8795 gen_move_insn (tem,
8796 copy_rtx (v->add_val)));
8798 /* Substitute the new register for its invariant value in
8799 the compare expression. */
8800 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8801 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8802 return 1;
8805 #endif
8806 break;
8808 case COMPARE:
8809 case EQ: case NE:
8810 case GT: case GE: case GTU: case GEU:
8811 case LT: case LE: case LTU: case LEU:
8812 /* See if either argument is the biv. */
8813 if (XEXP (x, 0) == reg)
8814 arg = XEXP (x, 1), arg_operand = 1;
8815 else if (XEXP (x, 1) == reg)
8816 arg = XEXP (x, 0), arg_operand = 0;
8817 else
8818 break;
8820 if (CONSTANT_P (arg))
8822 /* First try to replace with any giv that has constant positive
8823 mult_val and constant add_val. We might be able to support
8824 negative mult_val, but it seems complex to do it in general. */
8826 for (v = bl->giv; v; v = v->next_iv)
8827 if (GET_CODE (v->mult_val) == CONST_INT
8828 && INTVAL (v->mult_val) > 0
8829 && (GET_CODE (v->add_val) == SYMBOL_REF
8830 || GET_CODE (v->add_val) == LABEL_REF
8831 || GET_CODE (v->add_val) == CONST
8832 || (GET_CODE (v->add_val) == REG
8833 && REG_POINTER (v->add_val)))
8834 && ! v->ignore && ! v->maybe_dead && v->always_computable
8835 && v->mode == mode)
8837 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8838 continue;
8840 /* Don't eliminate if the linear combination that makes up
8841 the giv overflows when it is applied to ARG. */
8842 if (GET_CODE (arg) == CONST_INT)
8844 rtx add_val;
8846 if (GET_CODE (v->add_val) == CONST_INT)
8847 add_val = v->add_val;
8848 else
8849 add_val = const0_rtx;
8851 if (const_mult_add_overflow_p (arg, v->mult_val,
8852 add_val, mode, 1))
8853 continue;
8856 if (! eliminate_p)
8857 return 1;
8859 /* Replace biv with the giv's reduced reg. */
8860 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8862 /* If all constants are actually constant integers and
8863 the derived constant can be directly placed in the COMPARE,
8864 do so. */
8865 if (GET_CODE (arg) == CONST_INT
8866 && GET_CODE (v->add_val) == CONST_INT)
8868 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8869 v->add_val, mode, 1);
8871 else
8873 /* Otherwise, load it into a register. */
8874 tem = gen_reg_rtx (mode);
8875 loop_iv_add_mult_emit_before (loop, arg,
8876 v->mult_val, v->add_val,
8877 tem, where_bb, where_insn);
8880 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8882 if (apply_change_group ())
8883 return 1;
8886 /* Look for giv with positive constant mult_val and nonconst add_val.
8887 Insert insns to calculate new compare value.
8888 ??? Turn this off due to possible overflow. */
8890 for (v = bl->giv; v; v = v->next_iv)
8891 if (GET_CODE (v->mult_val) == CONST_INT
8892 && INTVAL (v->mult_val) > 0
8893 && ! v->ignore && ! v->maybe_dead && v->always_computable
8894 && v->mode == mode
8895 && 0)
8897 rtx tem;
8899 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8900 continue;
8902 if (! eliminate_p)
8903 return 1;
8905 tem = gen_reg_rtx (mode);
8907 /* Replace biv with giv's reduced register. */
8908 validate_change (insn, &XEXP (x, 1 - arg_operand),
8909 v->new_reg, 1);
8911 /* Compute value to compare against. */
8912 loop_iv_add_mult_emit_before (loop, arg,
8913 v->mult_val, v->add_val,
8914 tem, where_bb, where_insn);
8915 /* Use it in this insn. */
8916 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8917 if (apply_change_group ())
8918 return 1;
8921 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8923 if (loop_invariant_p (loop, arg) == 1)
8925 /* Look for giv with constant positive mult_val and nonconst
8926 add_val. Insert insns to compute new compare value.
8927 ??? Turn this off due to possible overflow. */
8929 for (v = bl->giv; v; v = v->next_iv)
8930 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8931 && ! v->ignore && ! v->maybe_dead && v->always_computable
8932 && v->mode == mode
8933 && 0)
8935 rtx tem;
8937 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8938 continue;
8940 if (! eliminate_p)
8941 return 1;
8943 tem = gen_reg_rtx (mode);
8945 /* Replace biv with giv's reduced register. */
8946 validate_change (insn, &XEXP (x, 1 - arg_operand),
8947 v->new_reg, 1);
8949 /* Compute value to compare against. */
8950 loop_iv_add_mult_emit_before (loop, arg,
8951 v->mult_val, v->add_val,
8952 tem, where_bb, where_insn);
8953 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8954 if (apply_change_group ())
8955 return 1;
8959 /* This code has problems. Basically, you can't know when
8960 seeing if we will eliminate BL, whether a particular giv
8961 of ARG will be reduced. If it isn't going to be reduced,
8962 we can't eliminate BL. We can try forcing it to be reduced,
8963 but that can generate poor code.
8965 The problem is that the benefit of reducing TV, below should
8966 be increased if BL can actually be eliminated, but this means
8967 we might have to do a topological sort of the order in which
8968 we try to process biv. It doesn't seem worthwhile to do
8969 this sort of thing now. */
8971 #if 0
8972 /* Otherwise the reg compared with had better be a biv. */
8973 if (GET_CODE (arg) != REG
8974 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8975 return 0;
8977 /* Look for a pair of givs, one for each biv,
8978 with identical coefficients. */
8979 for (v = bl->giv; v; v = v->next_iv)
8981 struct induction *tv;
8983 if (v->ignore || v->maybe_dead || v->mode != mode)
8984 continue;
8986 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8987 tv = tv->next_iv)
8988 if (! tv->ignore && ! tv->maybe_dead
8989 && rtx_equal_p (tv->mult_val, v->mult_val)
8990 && rtx_equal_p (tv->add_val, v->add_val)
8991 && tv->mode == mode)
8993 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8994 continue;
8996 if (! eliminate_p)
8997 return 1;
8999 /* Replace biv with its giv's reduced reg. */
9000 XEXP (x, 1 - arg_operand) = v->new_reg;
9001 /* Replace other operand with the other giv's
9002 reduced reg. */
9003 XEXP (x, arg_operand) = tv->new_reg;
9004 return 1;
9007 #endif
9010 /* If we get here, the biv can't be eliminated. */
9011 return 0;
9013 case MEM:
9014 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9015 biv is used in it, since it will be replaced. */
9016 for (v = bl->giv; v; v = v->next_iv)
9017 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9018 return 1;
9019 break;
9021 default:
9022 break;
9025 /* See if any subexpression fails elimination. */
9026 fmt = GET_RTX_FORMAT (code);
9027 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9029 switch (fmt[i])
9031 case 'e':
9032 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9033 eliminate_p, where_bb, where_insn))
9034 return 0;
9035 break;
9037 case 'E':
9038 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9039 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9040 eliminate_p, where_bb, where_insn))
9041 return 0;
9042 break;
9046 return 1;
9049 /* Return nonzero if the last use of REG
9050 is in an insn following INSN in the same basic block. */
9052 static int
9053 last_use_this_basic_block (rtx reg, rtx insn)
9055 rtx n;
9056 for (n = insn;
9057 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9058 n = NEXT_INSN (n))
9060 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9061 return 1;
9063 return 0;
9066 /* Called via `note_stores' to record the initial value of a biv. Here we
9067 just record the location of the set and process it later. */
9069 static void
9070 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
9072 struct loop_ivs *ivs = (struct loop_ivs *) data;
9073 struct iv_class *bl;
9075 if (GET_CODE (dest) != REG
9076 || REGNO (dest) >= ivs->n_regs
9077 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9078 return;
9080 bl = REG_IV_CLASS (ivs, REGNO (dest));
9082 /* If this is the first set found, record it. */
9083 if (bl->init_insn == 0)
9085 bl->init_insn = note_insn;
9086 bl->init_set = set;
9090 /* If any of the registers in X are "old" and currently have a last use earlier
9091 than INSN, update them to have a last use of INSN. Their actual last use
9092 will be the previous insn but it will not have a valid uid_luid so we can't
9093 use it. X must be a source expression only. */
9095 static void
9096 update_reg_last_use (rtx x, rtx insn)
9098 /* Check for the case where INSN does not have a valid luid. In this case,
9099 there is no need to modify the regno_last_uid, as this can only happen
9100 when code is inserted after the loop_end to set a pseudo's final value,
9101 and hence this insn will never be the last use of x.
9102 ???? This comment is not correct. See for example loop_givs_reduce.
9103 This may insert an insn before another new insn. */
9104 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9105 && INSN_UID (insn) < max_uid_for_loop
9106 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9108 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9110 else
9112 int i, j;
9113 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9114 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9116 if (fmt[i] == 'e')
9117 update_reg_last_use (XEXP (x, i), insn);
9118 else if (fmt[i] == 'E')
9119 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9120 update_reg_last_use (XVECEXP (x, i, j), insn);
9125 /* Given an insn INSN and condition COND, return the condition in a
9126 canonical form to simplify testing by callers. Specifically:
9128 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9129 (2) Both operands will be machine operands; (cc0) will have been replaced.
9130 (3) If an operand is a constant, it will be the second operand.
9131 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9132 for GE, GEU, and LEU.
9134 If the condition cannot be understood, or is an inequality floating-point
9135 comparison which needs to be reversed, 0 will be returned.
9137 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9139 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9140 insn used in locating the condition was found. If a replacement test
9141 of the condition is desired, it should be placed in front of that
9142 insn and we will be sure that the inputs are still valid.
9144 If WANT_REG is nonzero, we wish the condition to be relative to that
9145 register, if possible. Therefore, do not canonicalize the condition
9146 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
9147 to be a compare to a CC mode register. */
9150 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9151 rtx want_reg, int allow_cc_mode)
9153 enum rtx_code code;
9154 rtx prev = insn;
9155 rtx set;
9156 rtx tem;
9157 rtx op0, op1;
9158 int reverse_code = 0;
9159 enum machine_mode mode;
9161 code = GET_CODE (cond);
9162 mode = GET_MODE (cond);
9163 op0 = XEXP (cond, 0);
9164 op1 = XEXP (cond, 1);
9166 if (reverse)
9167 code = reversed_comparison_code (cond, insn);
9168 if (code == UNKNOWN)
9169 return 0;
9171 if (earliest)
9172 *earliest = insn;
9174 /* If we are comparing a register with zero, see if the register is set
9175 in the previous insn to a COMPARE or a comparison operation. Perform
9176 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9177 in cse.c */
9179 while (GET_RTX_CLASS (code) == '<'
9180 && op1 == CONST0_RTX (GET_MODE (op0))
9181 && op0 != want_reg)
9183 /* Set nonzero when we find something of interest. */
9184 rtx x = 0;
9186 #ifdef HAVE_cc0
9187 /* If comparison with cc0, import actual comparison from compare
9188 insn. */
9189 if (op0 == cc0_rtx)
9191 if ((prev = prev_nonnote_insn (prev)) == 0
9192 || GET_CODE (prev) != INSN
9193 || (set = single_set (prev)) == 0
9194 || SET_DEST (set) != cc0_rtx)
9195 return 0;
9197 op0 = SET_SRC (set);
9198 op1 = CONST0_RTX (GET_MODE (op0));
9199 if (earliest)
9200 *earliest = prev;
9202 #endif
9204 /* If this is a COMPARE, pick up the two things being compared. */
9205 if (GET_CODE (op0) == COMPARE)
9207 op1 = XEXP (op0, 1);
9208 op0 = XEXP (op0, 0);
9209 continue;
9211 else if (GET_CODE (op0) != REG)
9212 break;
9214 /* Go back to the previous insn. Stop if it is not an INSN. We also
9215 stop if it isn't a single set or if it has a REG_INC note because
9216 we don't want to bother dealing with it. */
9218 if ((prev = prev_nonnote_insn (prev)) == 0
9219 || GET_CODE (prev) != INSN
9220 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9221 break;
9223 set = set_of (op0, prev);
9225 if (set
9226 && (GET_CODE (set) != SET
9227 || !rtx_equal_p (SET_DEST (set), op0)))
9228 break;
9230 /* If this is setting OP0, get what it sets it to if it looks
9231 relevant. */
9232 if (set)
9234 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9235 #ifdef FLOAT_STORE_FLAG_VALUE
9236 REAL_VALUE_TYPE fsfv;
9237 #endif
9239 /* ??? We may not combine comparisons done in a CCmode with
9240 comparisons not done in a CCmode. This is to aid targets
9241 like Alpha that have an IEEE compliant EQ instruction, and
9242 a non-IEEE compliant BEQ instruction. The use of CCmode is
9243 actually artificial, simply to prevent the combination, but
9244 should not affect other platforms.
9246 However, we must allow VOIDmode comparisons to match either
9247 CCmode or non-CCmode comparison, because some ports have
9248 modeless comparisons inside branch patterns.
9250 ??? This mode check should perhaps look more like the mode check
9251 in simplify_comparison in combine. */
9253 if ((GET_CODE (SET_SRC (set)) == COMPARE
9254 || (((code == NE
9255 || (code == LT
9256 && GET_MODE_CLASS (inner_mode) == MODE_INT
9257 && (GET_MODE_BITSIZE (inner_mode)
9258 <= HOST_BITS_PER_WIDE_INT)
9259 && (STORE_FLAG_VALUE
9260 & ((HOST_WIDE_INT) 1
9261 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9262 #ifdef FLOAT_STORE_FLAG_VALUE
9263 || (code == LT
9264 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9265 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9266 REAL_VALUE_NEGATIVE (fsfv)))
9267 #endif
9269 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9270 && (((GET_MODE_CLASS (mode) == MODE_CC)
9271 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9272 || mode == VOIDmode || inner_mode == VOIDmode))
9273 x = SET_SRC (set);
9274 else if (((code == EQ
9275 || (code == GE
9276 && (GET_MODE_BITSIZE (inner_mode)
9277 <= HOST_BITS_PER_WIDE_INT)
9278 && GET_MODE_CLASS (inner_mode) == MODE_INT
9279 && (STORE_FLAG_VALUE
9280 & ((HOST_WIDE_INT) 1
9281 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9282 #ifdef FLOAT_STORE_FLAG_VALUE
9283 || (code == GE
9284 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9285 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9286 REAL_VALUE_NEGATIVE (fsfv)))
9287 #endif
9289 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9290 && (((GET_MODE_CLASS (mode) == MODE_CC)
9291 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9292 || mode == VOIDmode || inner_mode == VOIDmode))
9295 reverse_code = 1;
9296 x = SET_SRC (set);
9298 else
9299 break;
9302 else if (reg_set_p (op0, prev))
9303 /* If this sets OP0, but not directly, we have to give up. */
9304 break;
9306 if (x)
9308 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9309 code = GET_CODE (x);
9310 if (reverse_code)
9312 code = reversed_comparison_code (x, prev);
9313 if (code == UNKNOWN)
9314 return 0;
9315 reverse_code = 0;
9318 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9319 if (earliest)
9320 *earliest = prev;
9324 /* If constant is first, put it last. */
9325 if (CONSTANT_P (op0))
9326 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9328 /* If OP0 is the result of a comparison, we weren't able to find what
9329 was really being compared, so fail. */
9330 if (!allow_cc_mode
9331 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9332 return 0;
9334 /* Canonicalize any ordered comparison with integers involving equality
9335 if we can do computations in the relevant mode and we do not
9336 overflow. */
9338 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
9339 && GET_CODE (op1) == CONST_INT
9340 && GET_MODE (op0) != VOIDmode
9341 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9343 HOST_WIDE_INT const_val = INTVAL (op1);
9344 unsigned HOST_WIDE_INT uconst_val = const_val;
9345 unsigned HOST_WIDE_INT max_val
9346 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9348 switch (code)
9350 case LE:
9351 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9352 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9353 break;
9355 /* When cross-compiling, const_val might be sign-extended from
9356 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9357 case GE:
9358 if ((HOST_WIDE_INT) (const_val & max_val)
9359 != (((HOST_WIDE_INT) 1
9360 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9361 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9362 break;
9364 case LEU:
9365 if (uconst_val < max_val)
9366 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9367 break;
9369 case GEU:
9370 if (uconst_val != 0)
9371 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9372 break;
9374 default:
9375 break;
9379 /* Never return CC0; return zero instead. */
9380 if (CC0_P (op0))
9381 return 0;
9383 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9386 /* Given a jump insn JUMP, return the condition that will cause it to branch
9387 to its JUMP_LABEL. If the condition cannot be understood, or is an
9388 inequality floating-point comparison which needs to be reversed, 0 will
9389 be returned.
9391 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9392 insn used in locating the condition was found. If a replacement test
9393 of the condition is desired, it should be placed in front of that
9394 insn and we will be sure that the inputs are still valid.
9396 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
9397 compare CC mode register. */
9400 get_condition (rtx jump, rtx *earliest, int allow_cc_mode)
9402 rtx cond;
9403 int reverse;
9404 rtx set;
9406 /* If this is not a standard conditional jump, we can't parse it. */
9407 if (GET_CODE (jump) != JUMP_INSN
9408 || ! any_condjump_p (jump))
9409 return 0;
9410 set = pc_set (jump);
9412 cond = XEXP (SET_SRC (set), 0);
9414 /* If this branches to JUMP_LABEL when the condition is false, reverse
9415 the condition. */
9416 reverse
9417 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9418 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9420 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
9421 allow_cc_mode);
9424 /* Similar to above routine, except that we also put an invariant last
9425 unless both operands are invariants. */
9428 get_condition_for_loop (const struct loop *loop, rtx x)
9430 rtx comparison = get_condition (x, (rtx*) 0, false);
9432 if (comparison == 0
9433 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9434 || loop_invariant_p (loop, XEXP (comparison, 1)))
9435 return comparison;
9437 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9438 XEXP (comparison, 1), XEXP (comparison, 0));
9441 /* Scan the function and determine whether it has indirect (computed) jumps.
9443 This is taken mostly from flow.c; similar code exists elsewhere
9444 in the compiler. It may be useful to put this into rtlanal.c. */
9445 static int
9446 indirect_jump_in_function_p (rtx start)
9448 rtx insn;
9450 for (insn = start; insn; insn = NEXT_INSN (insn))
9451 if (computed_jump_p (insn))
9452 return 1;
9454 return 0;
9457 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9458 documentation for LOOP_MEMS for the definition of `appropriate'.
9459 This function is called from prescan_loop via for_each_rtx. */
9461 static int
9462 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
9464 struct loop_info *loop_info = data;
9465 int i;
9466 rtx m = *mem;
9468 if (m == NULL_RTX)
9469 return 0;
9471 switch (GET_CODE (m))
9473 case MEM:
9474 break;
9476 case CLOBBER:
9477 /* We're not interested in MEMs that are only clobbered. */
9478 return -1;
9480 case CONST_DOUBLE:
9481 /* We're not interested in the MEM associated with a
9482 CONST_DOUBLE, so there's no need to traverse into this. */
9483 return -1;
9485 case EXPR_LIST:
9486 /* We're not interested in any MEMs that only appear in notes. */
9487 return -1;
9489 default:
9490 /* This is not a MEM. */
9491 return 0;
9494 /* See if we've already seen this MEM. */
9495 for (i = 0; i < loop_info->mems_idx; ++i)
9496 if (rtx_equal_p (m, loop_info->mems[i].mem))
9498 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9499 /* The modes of the two memory accesses are different. If
9500 this happens, something tricky is going on, and we just
9501 don't optimize accesses to this MEM. */
9502 loop_info->mems[i].optimize = 0;
9504 return 0;
9507 /* Resize the array, if necessary. */
9508 if (loop_info->mems_idx == loop_info->mems_allocated)
9510 if (loop_info->mems_allocated != 0)
9511 loop_info->mems_allocated *= 2;
9512 else
9513 loop_info->mems_allocated = 32;
9515 loop_info->mems = xrealloc (loop_info->mems,
9516 loop_info->mems_allocated * sizeof (loop_mem_info));
9519 /* Actually insert the MEM. */
9520 loop_info->mems[loop_info->mems_idx].mem = m;
9521 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9522 because we can't put it in a register. We still store it in the
9523 table, though, so that if we see the same address later, but in a
9524 non-BLK mode, we'll not think we can optimize it at that point. */
9525 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9526 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9527 ++loop_info->mems_idx;
9529 return 0;
9533 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9535 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9536 register that is modified by an insn between FROM and TO. If the
9537 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9538 more, stop incrementing it, to avoid overflow.
9540 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9541 register I is used, if it is only used once. Otherwise, it is set
9542 to 0 (for no uses) or const0_rtx for more than one use. This
9543 parameter may be zero, in which case this processing is not done.
9545 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9546 optimize register I. */
9548 static void
9549 loop_regs_scan (const struct loop *loop, int extra_size)
9551 struct loop_regs *regs = LOOP_REGS (loop);
9552 int old_nregs;
9553 /* last_set[n] is nonzero iff reg n has been set in the current
9554 basic block. In that case, it is the insn that last set reg n. */
9555 rtx *last_set;
9556 rtx insn;
9557 int i;
9559 old_nregs = regs->num;
9560 regs->num = max_reg_num ();
9562 /* Grow the regs array if not allocated or too small. */
9563 if (regs->num >= regs->size)
9565 regs->size = regs->num + extra_size;
9567 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
9569 /* Zero the new elements. */
9570 memset (regs->array + old_nregs, 0,
9571 (regs->size - old_nregs) * sizeof (*regs->array));
9574 /* Clear previously scanned fields but do not clear n_times_set. */
9575 for (i = 0; i < old_nregs; i++)
9577 regs->array[i].set_in_loop = 0;
9578 regs->array[i].may_not_optimize = 0;
9579 regs->array[i].single_usage = NULL_RTX;
9582 last_set = xcalloc (regs->num, sizeof (rtx));
9584 /* Scan the loop, recording register usage. */
9585 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9586 insn = NEXT_INSN (insn))
9588 if (INSN_P (insn))
9590 /* Record registers that have exactly one use. */
9591 find_single_use_in_loop (regs, insn, PATTERN (insn));
9593 /* Include uses in REG_EQUAL notes. */
9594 if (REG_NOTES (insn))
9595 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9597 if (GET_CODE (PATTERN (insn)) == SET
9598 || GET_CODE (PATTERN (insn)) == CLOBBER)
9599 count_one_set (regs, insn, PATTERN (insn), last_set);
9600 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9602 int i;
9603 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9604 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9605 last_set);
9609 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9610 memset (last_set, 0, regs->num * sizeof (rtx));
9612 /* Invalidate all registers used for function argument passing.
9613 We check rtx_varies_p for the same reason as below, to allow
9614 optimizing PIC calculations. */
9615 if (GET_CODE (insn) == CALL_INSN)
9617 rtx link;
9618 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9619 link;
9620 link = XEXP (link, 1))
9622 rtx op, reg;
9624 if (GET_CODE (op = XEXP (link, 0)) == USE
9625 && GET_CODE (reg = XEXP (op, 0)) == REG
9626 && rtx_varies_p (reg, 1))
9627 regs->array[REGNO (reg)].may_not_optimize = 1;
9632 /* Invalidate all hard registers clobbered by calls. With one exception:
9633 a call-clobbered PIC register is still function-invariant for our
9634 purposes, since we can hoist any PIC calculations out of the loop.
9635 Thus the call to rtx_varies_p. */
9636 if (LOOP_INFO (loop)->has_call)
9637 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9638 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9639 && rtx_varies_p (regno_reg_rtx[i], 1))
9641 regs->array[i].may_not_optimize = 1;
9642 regs->array[i].set_in_loop = 1;
9645 #ifdef AVOID_CCMODE_COPIES
9646 /* Don't try to move insns which set CC registers if we should not
9647 create CCmode register copies. */
9648 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9649 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9650 regs->array[i].may_not_optimize = 1;
9651 #endif
9653 /* Set regs->array[I].n_times_set for the new registers. */
9654 for (i = old_nregs; i < regs->num; i++)
9655 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9657 free (last_set);
9660 /* Returns the number of real INSNs in the LOOP. */
9662 static int
9663 count_insns_in_loop (const struct loop *loop)
9665 int count = 0;
9666 rtx insn;
9668 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9669 insn = NEXT_INSN (insn))
9670 if (INSN_P (insn))
9671 ++count;
9673 return count;
9676 /* Move MEMs into registers for the duration of the loop. */
9678 static void
9679 load_mems (const struct loop *loop)
9681 struct loop_info *loop_info = LOOP_INFO (loop);
9682 struct loop_regs *regs = LOOP_REGS (loop);
9683 int maybe_never = 0;
9684 int i;
9685 rtx p, prev_ebb_head;
9686 rtx label = NULL_RTX;
9687 rtx end_label;
9688 /* Nonzero if the next instruction may never be executed. */
9689 int next_maybe_never = 0;
9690 unsigned int last_max_reg = max_reg_num ();
9692 if (loop_info->mems_idx == 0)
9693 return;
9695 /* We cannot use next_label here because it skips over normal insns. */
9696 end_label = next_nonnote_insn (loop->end);
9697 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9698 end_label = NULL_RTX;
9700 /* Check to see if it's possible that some instructions in the loop are
9701 never executed. Also check if there is a goto out of the loop other
9702 than right after the end of the loop. */
9703 for (p = next_insn_in_loop (loop, loop->scan_start);
9704 p != NULL_RTX;
9705 p = next_insn_in_loop (loop, p))
9707 if (GET_CODE (p) == CODE_LABEL)
9708 maybe_never = 1;
9709 else if (GET_CODE (p) == JUMP_INSN
9710 /* If we enter the loop in the middle, and scan
9711 around to the beginning, don't set maybe_never
9712 for that. This must be an unconditional jump,
9713 otherwise the code at the top of the loop might
9714 never be executed. Unconditional jumps are
9715 followed a by barrier then loop end. */
9716 && ! (GET_CODE (p) == JUMP_INSN
9717 && JUMP_LABEL (p) == loop->top
9718 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9719 && any_uncondjump_p (p)))
9721 /* If this is a jump outside of the loop but not right
9722 after the end of the loop, we would have to emit new fixup
9723 sequences for each such label. */
9724 if (/* If we can't tell where control might go when this
9725 JUMP_INSN is executed, we must be conservative. */
9726 !JUMP_LABEL (p)
9727 || (JUMP_LABEL (p) != end_label
9728 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9729 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9730 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9731 return;
9733 if (!any_condjump_p (p))
9734 /* Something complicated. */
9735 maybe_never = 1;
9736 else
9737 /* If there are any more instructions in the loop, they
9738 might not be reached. */
9739 next_maybe_never = 1;
9741 else if (next_maybe_never)
9742 maybe_never = 1;
9745 /* Find start of the extended basic block that enters the loop. */
9746 for (p = loop->start;
9747 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9748 p = PREV_INSN (p))
9750 prev_ebb_head = p;
9752 cselib_init ();
9754 /* Build table of mems that get set to constant values before the
9755 loop. */
9756 for (; p != loop->start; p = NEXT_INSN (p))
9757 cselib_process_insn (p);
9759 /* Actually move the MEMs. */
9760 for (i = 0; i < loop_info->mems_idx; ++i)
9762 regset_head load_copies;
9763 regset_head store_copies;
9764 int written = 0;
9765 rtx reg;
9766 rtx mem = loop_info->mems[i].mem;
9767 rtx mem_list_entry;
9769 if (MEM_VOLATILE_P (mem)
9770 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9771 /* There's no telling whether or not MEM is modified. */
9772 loop_info->mems[i].optimize = 0;
9774 /* Go through the MEMs written to in the loop to see if this
9775 one is aliased by one of them. */
9776 mem_list_entry = loop_info->store_mems;
9777 while (mem_list_entry)
9779 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9780 written = 1;
9781 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9782 mem, rtx_varies_p))
9784 /* MEM is indeed aliased by this store. */
9785 loop_info->mems[i].optimize = 0;
9786 break;
9788 mem_list_entry = XEXP (mem_list_entry, 1);
9791 if (flag_float_store && written
9792 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9793 loop_info->mems[i].optimize = 0;
9795 /* If this MEM is written to, we must be sure that there
9796 are no reads from another MEM that aliases this one. */
9797 if (loop_info->mems[i].optimize && written)
9799 int j;
9801 for (j = 0; j < loop_info->mems_idx; ++j)
9803 if (j == i)
9804 continue;
9805 else if (true_dependence (mem,
9806 VOIDmode,
9807 loop_info->mems[j].mem,
9808 rtx_varies_p))
9810 /* It's not safe to hoist loop_info->mems[i] out of
9811 the loop because writes to it might not be
9812 seen by reads from loop_info->mems[j]. */
9813 loop_info->mems[i].optimize = 0;
9814 break;
9819 if (maybe_never && may_trap_p (mem))
9820 /* We can't access the MEM outside the loop; it might
9821 cause a trap that wouldn't have happened otherwise. */
9822 loop_info->mems[i].optimize = 0;
9824 if (!loop_info->mems[i].optimize)
9825 /* We thought we were going to lift this MEM out of the
9826 loop, but later discovered that we could not. */
9827 continue;
9829 INIT_REG_SET (&load_copies);
9830 INIT_REG_SET (&store_copies);
9832 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9833 order to keep scan_loop from moving stores to this MEM
9834 out of the loop just because this REG is neither a
9835 user-variable nor used in the loop test. */
9836 reg = gen_reg_rtx (GET_MODE (mem));
9837 REG_USERVAR_P (reg) = 1;
9838 loop_info->mems[i].reg = reg;
9840 /* Now, replace all references to the MEM with the
9841 corresponding pseudos. */
9842 maybe_never = 0;
9843 for (p = next_insn_in_loop (loop, loop->scan_start);
9844 p != NULL_RTX;
9845 p = next_insn_in_loop (loop, p))
9847 if (INSN_P (p))
9849 rtx set;
9851 set = single_set (p);
9853 /* See if this copies the mem into a register that isn't
9854 modified afterwards. We'll try to do copy propagation
9855 a little further on. */
9856 if (set
9857 /* @@@ This test is _way_ too conservative. */
9858 && ! maybe_never
9859 && GET_CODE (SET_DEST (set)) == REG
9860 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9861 && REGNO (SET_DEST (set)) < last_max_reg
9862 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9863 && rtx_equal_p (SET_SRC (set), mem))
9864 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9866 /* See if this copies the mem from a register that isn't
9867 modified afterwards. We'll try to remove the
9868 redundant copy later on by doing a little register
9869 renaming and copy propagation. This will help
9870 to untangle things for the BIV detection code. */
9871 if (set
9872 && ! maybe_never
9873 && GET_CODE (SET_SRC (set)) == REG
9874 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9875 && REGNO (SET_SRC (set)) < last_max_reg
9876 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9877 && rtx_equal_p (SET_DEST (set), mem))
9878 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9880 /* If this is a call which uses / clobbers this memory
9881 location, we must not change the interface here. */
9882 if (GET_CODE (p) == CALL_INSN
9883 && reg_mentioned_p (loop_info->mems[i].mem,
9884 CALL_INSN_FUNCTION_USAGE (p)))
9886 cancel_changes (0);
9887 loop_info->mems[i].optimize = 0;
9888 break;
9890 else
9891 /* Replace the memory reference with the shadow register. */
9892 replace_loop_mems (p, loop_info->mems[i].mem,
9893 loop_info->mems[i].reg, written);
9896 if (GET_CODE (p) == CODE_LABEL
9897 || GET_CODE (p) == JUMP_INSN)
9898 maybe_never = 1;
9901 if (! loop_info->mems[i].optimize)
9902 ; /* We found we couldn't do the replacement, so do nothing. */
9903 else if (! apply_change_group ())
9904 /* We couldn't replace all occurrences of the MEM. */
9905 loop_info->mems[i].optimize = 0;
9906 else
9908 /* Load the memory immediately before LOOP->START, which is
9909 the NOTE_LOOP_BEG. */
9910 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9911 rtx set;
9912 rtx best = mem;
9913 int j;
9914 struct elt_loc_list *const_equiv = 0;
9916 if (e)
9918 struct elt_loc_list *equiv;
9919 struct elt_loc_list *best_equiv = 0;
9920 for (equiv = e->locs; equiv; equiv = equiv->next)
9922 if (CONSTANT_P (equiv->loc))
9923 const_equiv = equiv;
9924 else if (GET_CODE (equiv->loc) == REG
9925 /* Extending hard register lifetimes causes crash
9926 on SRC targets. Doing so on non-SRC is
9927 probably also not good idea, since we most
9928 probably have pseudoregister equivalence as
9929 well. */
9930 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9931 best_equiv = equiv;
9933 /* Use the constant equivalence if that is cheap enough. */
9934 if (! best_equiv)
9935 best_equiv = const_equiv;
9936 else if (const_equiv
9937 && (rtx_cost (const_equiv->loc, SET)
9938 <= rtx_cost (best_equiv->loc, SET)))
9940 best_equiv = const_equiv;
9941 const_equiv = 0;
9944 /* If best_equiv is nonzero, we know that MEM is set to a
9945 constant or register before the loop. We will use this
9946 knowledge to initialize the shadow register with that
9947 constant or reg rather than by loading from MEM. */
9948 if (best_equiv)
9949 best = copy_rtx (best_equiv->loc);
9952 set = gen_move_insn (reg, best);
9953 set = loop_insn_hoist (loop, set);
9954 if (REG_P (best))
9956 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9957 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9959 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9960 break;
9964 if (const_equiv)
9965 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9967 if (written)
9969 if (label == NULL_RTX)
9971 label = gen_label_rtx ();
9972 emit_label_after (label, loop->end);
9975 /* Store the memory immediately after END, which is
9976 the NOTE_LOOP_END. */
9977 set = gen_move_insn (copy_rtx (mem), reg);
9978 loop_insn_emit_after (loop, 0, label, set);
9981 if (loop_dump_stream)
9983 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9984 REGNO (reg), (written ? "r/w" : "r/o"));
9985 print_rtl (loop_dump_stream, mem);
9986 fputc ('\n', loop_dump_stream);
9989 /* Attempt a bit of copy propagation. This helps untangle the
9990 data flow, and enables {basic,general}_induction_var to find
9991 more bivs/givs. */
9992 EXECUTE_IF_SET_IN_REG_SET
9993 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9995 try_copy_prop (loop, reg, j);
9997 CLEAR_REG_SET (&load_copies);
9999 EXECUTE_IF_SET_IN_REG_SET
10000 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10002 try_swap_copy_prop (loop, reg, j);
10004 CLEAR_REG_SET (&store_copies);
10008 /* Now, we need to replace all references to the previous exit
10009 label with the new one. */
10010 if (label != NULL_RTX && end_label != NULL_RTX)
10011 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10012 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10013 redirect_jump (p, label, false);
10015 cselib_finish ();
10018 /* For communication between note_reg_stored and its caller. */
10019 struct note_reg_stored_arg
10021 int set_seen;
10022 rtx reg;
10025 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10026 is equal to ARG. */
10027 static void
10028 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
10030 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10031 if (t->reg == x)
10032 t->set_seen = 1;
10035 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10036 There must be exactly one insn that sets this pseudo; it will be
10037 deleted if all replacements succeed and we can prove that the register
10038 is not used after the loop. */
10040 static void
10041 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
10043 /* This is the reg that we are copying from. */
10044 rtx reg_rtx = regno_reg_rtx[regno];
10045 rtx init_insn = 0;
10046 rtx insn;
10047 /* These help keep track of whether we replaced all uses of the reg. */
10048 int replaced_last = 0;
10049 int store_is_first = 0;
10051 for (insn = next_insn_in_loop (loop, loop->scan_start);
10052 insn != NULL_RTX;
10053 insn = next_insn_in_loop (loop, insn))
10055 rtx set;
10057 /* Only substitute within one extended basic block from the initializing
10058 insn. */
10059 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10060 break;
10062 if (! INSN_P (insn))
10063 continue;
10065 /* Is this the initializing insn? */
10066 set = single_set (insn);
10067 if (set
10068 && GET_CODE (SET_DEST (set)) == REG
10069 && REGNO (SET_DEST (set)) == regno)
10071 if (init_insn)
10072 abort ();
10074 init_insn = insn;
10075 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10076 store_is_first = 1;
10079 /* Only substitute after seeing the initializing insn. */
10080 if (init_insn && insn != init_insn)
10082 struct note_reg_stored_arg arg;
10084 replace_loop_regs (insn, reg_rtx, replacement);
10085 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10086 replaced_last = 1;
10088 /* Stop replacing when REPLACEMENT is modified. */
10089 arg.reg = replacement;
10090 arg.set_seen = 0;
10091 note_stores (PATTERN (insn), note_reg_stored, &arg);
10092 if (arg.set_seen)
10094 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10096 /* It is possible that we've turned previously valid REG_EQUAL to
10097 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10098 REPLACEMENT is modified, we get different meaning. */
10099 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10100 remove_note (insn, note);
10101 break;
10105 if (! init_insn)
10106 abort ();
10107 if (apply_change_group ())
10109 if (loop_dump_stream)
10110 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10111 if (store_is_first && replaced_last)
10113 rtx first;
10114 rtx retval_note;
10116 /* Assume we're just deleting INIT_INSN. */
10117 first = init_insn;
10118 /* Look for REG_RETVAL note. If we're deleting the end of
10119 the libcall sequence, the whole sequence can go. */
10120 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10121 /* If we found a REG_RETVAL note, find the first instruction
10122 in the sequence. */
10123 if (retval_note)
10124 first = XEXP (retval_note, 0);
10126 /* Delete the instructions. */
10127 loop_delete_insns (first, init_insn);
10129 if (loop_dump_stream)
10130 fprintf (loop_dump_stream, ".\n");
10134 /* Replace all the instructions from FIRST up to and including LAST
10135 with NOTE_INSN_DELETED notes. */
10137 static void
10138 loop_delete_insns (rtx first, rtx last)
10140 while (1)
10142 if (loop_dump_stream)
10143 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10144 INSN_UID (first));
10145 delete_insn (first);
10147 /* If this was the LAST instructions we're supposed to delete,
10148 we're done. */
10149 if (first == last)
10150 break;
10152 first = NEXT_INSN (first);
10156 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10157 loop LOOP if the order of the sets of these registers can be
10158 swapped. There must be exactly one insn within the loop that sets
10159 this pseudo followed immediately by a move insn that sets
10160 REPLACEMENT with REGNO. */
10161 static void
10162 try_swap_copy_prop (const struct loop *loop, rtx replacement,
10163 unsigned int regno)
10165 rtx insn;
10166 rtx set = NULL_RTX;
10167 unsigned int new_regno;
10169 new_regno = REGNO (replacement);
10171 for (insn = next_insn_in_loop (loop, loop->scan_start);
10172 insn != NULL_RTX;
10173 insn = next_insn_in_loop (loop, insn))
10175 /* Search for the insn that copies REGNO to NEW_REGNO? */
10176 if (INSN_P (insn)
10177 && (set = single_set (insn))
10178 && GET_CODE (SET_DEST (set)) == REG
10179 && REGNO (SET_DEST (set)) == new_regno
10180 && GET_CODE (SET_SRC (set)) == REG
10181 && REGNO (SET_SRC (set)) == regno)
10182 break;
10185 if (insn != NULL_RTX)
10187 rtx prev_insn;
10188 rtx prev_set;
10190 /* Some DEF-USE info would come in handy here to make this
10191 function more general. For now, just check the previous insn
10192 which is the most likely candidate for setting REGNO. */
10194 prev_insn = PREV_INSN (insn);
10196 if (INSN_P (insn)
10197 && (prev_set = single_set (prev_insn))
10198 && GET_CODE (SET_DEST (prev_set)) == REG
10199 && REGNO (SET_DEST (prev_set)) == regno)
10201 /* We have:
10202 (set (reg regno) (expr))
10203 (set (reg new_regno) (reg regno))
10205 so try converting this to:
10206 (set (reg new_regno) (expr))
10207 (set (reg regno) (reg new_regno))
10209 The former construct is often generated when a global
10210 variable used for an induction variable is shadowed by a
10211 register (NEW_REGNO). The latter construct improves the
10212 chances of GIV replacement and BIV elimination. */
10214 validate_change (prev_insn, &SET_DEST (prev_set),
10215 replacement, 1);
10216 validate_change (insn, &SET_DEST (set),
10217 SET_SRC (set), 1);
10218 validate_change (insn, &SET_SRC (set),
10219 replacement, 1);
10221 if (apply_change_group ())
10223 if (loop_dump_stream)
10224 fprintf (loop_dump_stream,
10225 " Swapped set of reg %d at %d with reg %d at %d.\n",
10226 regno, INSN_UID (insn),
10227 new_regno, INSN_UID (prev_insn));
10229 /* Update first use of REGNO. */
10230 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10231 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10233 /* Now perform copy propagation to hopefully
10234 remove all uses of REGNO within the loop. */
10235 try_copy_prop (loop, replacement, regno);
10241 /* Worker function for find_mem_in_note, called via for_each_rtx. */
10243 static int
10244 find_mem_in_note_1 (rtx *x, void *data)
10246 if (*x != NULL_RTX && GET_CODE (*x) == MEM)
10248 rtx *res = (rtx *) data;
10249 *res = *x;
10250 return 1;
10252 return 0;
10255 /* Returns the first MEM found in NOTE by depth-first search. */
10257 static rtx
10258 find_mem_in_note (rtx note)
10260 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
10261 return note;
10262 return NULL_RTX;
10265 /* Replace MEM with its associated pseudo register. This function is
10266 called from load_mems via for_each_rtx. DATA is actually a pointer
10267 to a structure describing the instruction currently being scanned
10268 and the MEM we are currently replacing. */
10270 static int
10271 replace_loop_mem (rtx *mem, void *data)
10273 loop_replace_args *args = (loop_replace_args *) data;
10274 rtx m = *mem;
10276 if (m == NULL_RTX)
10277 return 0;
10279 switch (GET_CODE (m))
10281 case MEM:
10282 break;
10284 case CONST_DOUBLE:
10285 /* We're not interested in the MEM associated with a
10286 CONST_DOUBLE, so there's no need to traverse into one. */
10287 return -1;
10289 default:
10290 /* This is not a MEM. */
10291 return 0;
10294 if (!rtx_equal_p (args->match, m))
10295 /* This is not the MEM we are currently replacing. */
10296 return 0;
10298 /* Actually replace the MEM. */
10299 validate_change (args->insn, mem, args->replacement, 1);
10301 return 0;
10304 static void
10305 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
10307 loop_replace_args args;
10309 args.insn = insn;
10310 args.match = mem;
10311 args.replacement = reg;
10313 for_each_rtx (&insn, replace_loop_mem, &args);
10315 /* If we hoist a mem write out of the loop, then REG_EQUAL
10316 notes referring to the mem are no longer valid. */
10317 if (written)
10319 rtx note, sub;
10320 rtx *link;
10322 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10324 if (REG_NOTE_KIND (note) == REG_EQUAL
10325 && (sub = find_mem_in_note (note))
10326 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10328 /* Remove the note. */
10329 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10330 break;
10336 /* Replace one register with another. Called through for_each_rtx; PX points
10337 to the rtx being scanned. DATA is actually a pointer to
10338 a structure of arguments. */
10340 static int
10341 replace_loop_reg (rtx *px, void *data)
10343 rtx x = *px;
10344 loop_replace_args *args = (loop_replace_args *) data;
10346 if (x == NULL_RTX)
10347 return 0;
10349 if (x == args->match)
10350 validate_change (args->insn, px, args->replacement, 1);
10352 return 0;
10355 static void
10356 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
10358 loop_replace_args args;
10360 args.insn = insn;
10361 args.match = reg;
10362 args.replacement = replacement;
10364 for_each_rtx (&insn, replace_loop_reg, &args);
10367 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10368 (ignored in the interim). */
10370 static rtx
10371 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10372 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10373 rtx pattern)
10375 return emit_insn_after (pattern, where_insn);
10379 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10380 in basic block WHERE_BB (ignored in the interim) within the loop
10381 otherwise hoist PATTERN into the loop pre-header. */
10384 loop_insn_emit_before (const struct loop *loop,
10385 basic_block where_bb ATTRIBUTE_UNUSED,
10386 rtx where_insn, rtx pattern)
10388 if (! where_insn)
10389 return loop_insn_hoist (loop, pattern);
10390 return emit_insn_before (pattern, where_insn);
10394 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10395 WHERE_BB (ignored in the interim) within the loop. */
10397 static rtx
10398 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10399 basic_block where_bb ATTRIBUTE_UNUSED,
10400 rtx where_insn, rtx pattern)
10402 return emit_call_insn_before (pattern, where_insn);
10406 /* Hoist insn for PATTERN into the loop pre-header. */
10409 loop_insn_hoist (const struct loop *loop, rtx pattern)
10411 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10415 /* Hoist call insn for PATTERN into the loop pre-header. */
10417 static rtx
10418 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
10420 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10424 /* Sink insn for PATTERN after the loop end. */
10427 loop_insn_sink (const struct loop *loop, rtx pattern)
10429 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10432 /* bl->final_value can be either general_operand or PLUS of general_operand
10433 and constant. Emit sequence of instructions to load it into REG. */
10434 static rtx
10435 gen_load_of_final_value (rtx reg, rtx final_value)
10437 rtx seq;
10438 start_sequence ();
10439 final_value = force_operand (final_value, reg);
10440 if (final_value != reg)
10441 emit_move_insn (reg, final_value);
10442 seq = get_insns ();
10443 end_sequence ();
10444 return seq;
10447 /* If the loop has multiple exits, emit insn for PATTERN before the
10448 loop to ensure that it will always be executed no matter how the
10449 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10450 since this is slightly more efficient. */
10452 static rtx
10453 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
10455 if (loop->exit_count)
10456 return loop_insn_hoist (loop, pattern);
10457 else
10458 return loop_insn_sink (loop, pattern);
10461 static void
10462 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
10464 struct iv_class *bl;
10465 int iv_num = 0;
10467 if (! loop || ! file)
10468 return;
10470 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10471 iv_num++;
10473 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10475 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10477 loop_iv_class_dump (bl, file, verbose);
10478 fputc ('\n', file);
10483 static void
10484 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10485 int verbose ATTRIBUTE_UNUSED)
10487 struct induction *v;
10488 rtx incr;
10489 int i;
10491 if (! bl || ! file)
10492 return;
10494 fprintf (file, "IV class for reg %d, benefit %d\n",
10495 bl->regno, bl->total_benefit);
10497 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10498 if (bl->initial_value)
10500 fprintf (file, ", init val: ");
10501 print_simple_rtl (file, bl->initial_value);
10503 if (bl->initial_test)
10505 fprintf (file, ", init test: ");
10506 print_simple_rtl (file, bl->initial_test);
10508 fputc ('\n', file);
10510 if (bl->final_value)
10512 fprintf (file, " Final val: ");
10513 print_simple_rtl (file, bl->final_value);
10514 fputc ('\n', file);
10517 if ((incr = biv_total_increment (bl)))
10519 fprintf (file, " Total increment: ");
10520 print_simple_rtl (file, incr);
10521 fputc ('\n', file);
10524 /* List the increments. */
10525 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10527 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10528 print_simple_rtl (file, v->add_val);
10529 fputc ('\n', file);
10532 /* List the givs. */
10533 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10535 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10536 i, INSN_UID (v->insn), v->benefit);
10537 if (v->giv_type == DEST_ADDR)
10538 print_simple_rtl (file, v->mem);
10539 else
10540 print_simple_rtl (file, single_set (v->insn));
10541 fputc ('\n', file);
10546 static void
10547 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
10549 if (! v || ! file)
10550 return;
10552 fprintf (file,
10553 "Biv %d: insn %d",
10554 REGNO (v->dest_reg), INSN_UID (v->insn));
10555 fprintf (file, " const ");
10556 print_simple_rtl (file, v->add_val);
10558 if (verbose && v->final_value)
10560 fputc ('\n', file);
10561 fprintf (file, " final ");
10562 print_simple_rtl (file, v->final_value);
10565 fputc ('\n', file);
10569 static void
10570 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
10572 if (! v || ! file)
10573 return;
10575 if (v->giv_type == DEST_REG)
10576 fprintf (file, "Giv %d: insn %d",
10577 REGNO (v->dest_reg), INSN_UID (v->insn));
10578 else
10579 fprintf (file, "Dest address: insn %d",
10580 INSN_UID (v->insn));
10582 fprintf (file, " src reg %d benefit %d",
10583 REGNO (v->src_reg), v->benefit);
10584 fprintf (file, " lifetime %d",
10585 v->lifetime);
10587 if (v->replaceable)
10588 fprintf (file, " replaceable");
10590 if (v->no_const_addval)
10591 fprintf (file, " ncav");
10593 if (v->ext_dependent)
10595 switch (GET_CODE (v->ext_dependent))
10597 case SIGN_EXTEND:
10598 fprintf (file, " ext se");
10599 break;
10600 case ZERO_EXTEND:
10601 fprintf (file, " ext ze");
10602 break;
10603 case TRUNCATE:
10604 fprintf (file, " ext tr");
10605 break;
10606 default:
10607 abort ();
10611 fputc ('\n', file);
10612 fprintf (file, " mult ");
10613 print_simple_rtl (file, v->mult_val);
10615 fputc ('\n', file);
10616 fprintf (file, " add ");
10617 print_simple_rtl (file, v->add_val);
10619 if (verbose && v->final_value)
10621 fputc ('\n', file);
10622 fprintf (file, " final ");
10623 print_simple_rtl (file, v->final_value);
10626 fputc ('\n', file);
10630 void
10631 debug_ivs (const struct loop *loop)
10633 loop_ivs_dump (loop, stderr, 1);
10637 void
10638 debug_iv_class (const struct iv_class *bl)
10640 loop_iv_class_dump (bl, stderr, 1);
10644 void
10645 debug_biv (const struct induction *v)
10647 loop_biv_dump (v, stderr, 1);
10651 void
10652 debug_giv (const struct induction *v)
10654 loop_giv_dump (v, stderr, 1);
10658 #define LOOP_BLOCK_NUM_1(INSN) \
10659 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10661 /* The notes do not have an assigned block, so look at the next insn. */
10662 #define LOOP_BLOCK_NUM(INSN) \
10663 ((INSN) ? (GET_CODE (INSN) == NOTE \
10664 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10665 : LOOP_BLOCK_NUM_1 (INSN)) \
10666 : -1)
10668 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10670 static void
10671 loop_dump_aux (const struct loop *loop, FILE *file,
10672 int verbose ATTRIBUTE_UNUSED)
10674 rtx label;
10676 if (! loop || ! file)
10677 return;
10679 /* Print diagnostics to compare our concept of a loop with
10680 what the loop notes say. */
10681 if (! PREV_INSN (loop->first->head)
10682 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10683 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10684 != NOTE_INSN_LOOP_BEG)
10685 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10686 INSN_UID (PREV_INSN (loop->first->head)));
10687 if (! NEXT_INSN (loop->last->end)
10688 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10689 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10690 != NOTE_INSN_LOOP_END)
10691 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10692 INSN_UID (NEXT_INSN (loop->last->end)));
10694 if (loop->start)
10696 fprintf (file,
10697 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10698 LOOP_BLOCK_NUM (loop->start),
10699 LOOP_INSN_UID (loop->start),
10700 LOOP_BLOCK_NUM (loop->cont),
10701 LOOP_INSN_UID (loop->cont),
10702 LOOP_BLOCK_NUM (loop->cont),
10703 LOOP_INSN_UID (loop->cont),
10704 LOOP_BLOCK_NUM (loop->vtop),
10705 LOOP_INSN_UID (loop->vtop),
10706 LOOP_BLOCK_NUM (loop->end),
10707 LOOP_INSN_UID (loop->end));
10708 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10709 LOOP_BLOCK_NUM (loop->top),
10710 LOOP_INSN_UID (loop->top),
10711 LOOP_BLOCK_NUM (loop->scan_start),
10712 LOOP_INSN_UID (loop->scan_start));
10713 fprintf (file, ";; exit_count %d", loop->exit_count);
10714 if (loop->exit_count)
10716 fputs (", labels:", file);
10717 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10719 fprintf (file, " %d ",
10720 LOOP_INSN_UID (XEXP (label, 0)));
10723 fputs ("\n", file);
10725 /* This can happen when a marked loop appears as two nested loops,
10726 say from while (a || b) {}. The inner loop won't match
10727 the loop markers but the outer one will. */
10728 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10729 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10733 /* Call this function from the debugger to dump LOOP. */
10735 void
10736 debug_loop (const struct loop *loop)
10738 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10741 /* Call this function from the debugger to dump LOOPS. */
10743 void
10744 debug_loops (const struct loops *loops)
10746 flow_loops_dump (loops, stderr, loop_dump_aux, 1);