2002-08-22 Paolo Carlini <pcarlini@unitus.it>
[official-gcc.git] / gcc / loop.c
bloba777013de45f8a283b445da71078d794804ae42e
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "hard-reg-set.h"
44 #include "basic-block.h"
45 #include "insn-config.h"
46 #include "regs.h"
47 #include "recog.h"
48 #include "flags.h"
49 #include "real.h"
50 #include "loop.h"
51 #include "cselib.h"
52 #include "except.h"
53 #include "toplev.h"
54 #include "predict.h"
55 #include "insn-flags.h"
56 #include "optabs.h"
58 /* Not really meaningful values, but at least something. */
59 #ifndef SIMULTANEOUS_PREFETCHES
60 #define SIMULTANEOUS_PREFETCHES 3
61 #endif
62 #ifndef PREFETCH_BLOCK
63 #define PREFETCH_BLOCK 32
64 #endif
65 #ifndef HAVE_prefetch
66 #define HAVE_prefetch 0
67 #define CODE_FOR_prefetch 0
68 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
69 #endif
71 /* Give up the prefetch optimizations once we exceed a given threshhold.
72 It is unlikely that we would be able to optimize something in a loop
73 with so many detected prefetches. */
74 #define MAX_PREFETCHES 100
75 /* The number of prefetch blocks that are beneficial to fetch at once before
76 a loop with a known (and low) iteration count. */
77 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
78 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
79 since it is likely that the data are already in the cache. */
80 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* Parameterize some prefetch heuristics so they can be turned on and off
83 easily for performance testing on new architecures. These can be
84 defined in target-dependent files. */
86 /* Prefetch is worthwhile only when loads/stores are dense. */
87 #ifndef PREFETCH_ONLY_DENSE_MEM
88 #define PREFETCH_ONLY_DENSE_MEM 1
89 #endif
91 /* Define what we mean by "dense" loads and stores; This value divided by 256
92 is the minimum percentage of memory references that worth prefetching. */
93 #ifndef PREFETCH_DENSE_MEM
94 #define PREFETCH_DENSE_MEM 220
95 #endif
97 /* Do not prefetch for a loop whose iteration count is known to be low. */
98 #ifndef PREFETCH_NO_LOW_LOOPCNT
99 #define PREFETCH_NO_LOW_LOOPCNT 1
100 #endif
102 /* Define what we mean by a "low" iteration count. */
103 #ifndef PREFETCH_LOW_LOOPCNT
104 #define PREFETCH_LOW_LOOPCNT 32
105 #endif
107 /* Do not prefetch for a loop that contains a function call; such a loop is
108 probably not an internal loop. */
109 #ifndef PREFETCH_NO_CALL
110 #define PREFETCH_NO_CALL 1
111 #endif
113 /* Do not prefetch accesses with an extreme stride. */
114 #ifndef PREFETCH_NO_EXTREME_STRIDE
115 #define PREFETCH_NO_EXTREME_STRIDE 1
116 #endif
118 /* Define what we mean by an "extreme" stride. */
119 #ifndef PREFETCH_EXTREME_STRIDE
120 #define PREFETCH_EXTREME_STRIDE 4096
121 #endif
123 /* Define a limit to how far apart indices can be and still be merged
124 into a single prefetch. */
125 #ifndef PREFETCH_EXTREME_DIFFERENCE
126 #define PREFETCH_EXTREME_DIFFERENCE 4096
127 #endif
129 /* Issue prefetch instructions before the loop to fetch data to be used
130 in the first few loop iterations. */
131 #ifndef PREFETCH_BEFORE_LOOP
132 #define PREFETCH_BEFORE_LOOP 1
133 #endif
135 /* Do not handle reversed order prefetches (negative stride). */
136 #ifndef PREFETCH_NO_REVERSE_ORDER
137 #define PREFETCH_NO_REVERSE_ORDER 1
138 #endif
140 /* Prefetch even if the GIV is in conditional code. */
141 #ifndef PREFETCH_CONDITIONAL
142 #define PREFETCH_CONDITIONAL 1
143 #endif
145 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
146 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
148 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
149 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
150 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
152 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
153 ((REGNO) < FIRST_PSEUDO_REGISTER \
154 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
157 /* Vector mapping INSN_UIDs to luids.
158 The luids are like uids but increase monotonically always.
159 We use them to see whether a jump comes from outside a given loop. */
161 int *uid_luid;
163 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
164 number the insn is contained in. */
166 struct loop **uid_loop;
168 /* 1 + largest uid of any insn. */
170 int max_uid_for_loop;
172 /* 1 + luid of last insn. */
174 static int max_luid;
176 /* Number of loops detected in current function. Used as index to the
177 next few tables. */
179 static int max_loop_num;
181 /* Bound on pseudo register number before loop optimization.
182 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
183 unsigned int max_reg_before_loop;
185 /* The value to pass to the next call of reg_scan_update. */
186 static int loop_max_reg;
188 /* During the analysis of a loop, a chain of `struct movable's
189 is made to record all the movable insns found.
190 Then the entire chain can be scanned to decide which to move. */
192 struct movable
194 rtx insn; /* A movable insn */
195 rtx set_src; /* The expression this reg is set from. */
196 rtx set_dest; /* The destination of this SET. */
197 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
198 of any registers used within the LIBCALL. */
199 int consec; /* Number of consecutive following insns
200 that must be moved with this one. */
201 unsigned int regno; /* The register it sets */
202 short lifetime; /* lifetime of that register;
203 may be adjusted when matching movables
204 that load the same value are found. */
205 short savings; /* Number of insns we can move for this reg,
206 including other movables that force this
207 or match this one. */
208 unsigned int cond : 1; /* 1 if only conditionally movable */
209 unsigned int force : 1; /* 1 means MUST move this insn */
210 unsigned int global : 1; /* 1 means reg is live outside this loop */
211 /* If PARTIAL is 1, GLOBAL means something different:
212 that the reg is live outside the range from where it is set
213 to the following label. */
214 unsigned int done : 1; /* 1 inhibits further processing of this */
216 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
217 In particular, moving it does not make it
218 invariant. */
219 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
220 load SRC, rather than copying INSN. */
221 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
222 first insn of a consecutive sets group. */
223 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
224 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
225 that we should avoid changing when clearing
226 the rest of the reg. */
227 struct movable *match; /* First entry for same value */
228 struct movable *forces; /* An insn that must be moved if this is */
229 struct movable *next;
233 FILE *loop_dump_stream;
235 /* Forward declarations. */
237 static void invalidate_loops_containing_label PARAMS ((rtx));
238 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
239 static void mark_loop_jump PARAMS ((rtx, struct loop *));
240 static void prescan_loop PARAMS ((struct loop *));
241 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
242 static int consec_sets_invariant_p PARAMS ((const struct loop *,
243 rtx, int, rtx));
244 static int labels_in_range_p PARAMS ((rtx, int));
245 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct loop_movables *));
256 static void force_movables PARAMS ((struct loop_movables *));
257 static void combine_movables PARAMS ((struct loop_movables *,
258 struct loop_regs *));
259 static int num_unmoved_movables PARAMS ((const struct loop *));
260 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
261 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
262 struct loop_regs *));
263 static void add_label_notes PARAMS ((rtx, rtx));
264 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
265 int, int));
266 static void loop_movables_add PARAMS((struct loop_movables *,
267 struct movable *));
268 static void loop_movables_free PARAMS((struct loop_movables *));
269 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
270 static void loop_bivs_find PARAMS((struct loop *));
271 static void loop_bivs_init_find PARAMS((struct loop *));
272 static void loop_bivs_check PARAMS((struct loop *));
273 static void loop_givs_find PARAMS((struct loop *));
274 static void loop_givs_check PARAMS((struct loop *));
275 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
276 int, int));
277 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
278 struct induction *, rtx));
279 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
282 rtx *));
283 static void loop_ivs_free PARAMS((struct loop *));
284 static void strength_reduce PARAMS ((struct loop *, int));
285 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
286 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
287 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
288 static void record_biv PARAMS ((struct loop *, struct induction *,
289 rtx, rtx, rtx, rtx, rtx *,
290 int, int));
291 static void check_final_value PARAMS ((const struct loop *,
292 struct induction *));
293 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
294 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
295 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
296 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
297 static void record_giv PARAMS ((const struct loop *, struct induction *,
298 rtx, rtx, rtx, rtx, rtx, rtx, int,
299 enum g_types, int, int, rtx *));
300 static void update_giv_derive PARAMS ((const struct loop *, rtx));
301 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
302 struct loop_info *));
303 static int basic_induction_var PARAMS ((const struct loop *, rtx,
304 enum machine_mode, rtx, rtx,
305 rtx *, rtx *, rtx **));
306 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
307 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
308 rtx *, rtx *, rtx *, int, int *,
309 enum machine_mode));
310 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
311 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
312 static int check_dbra_loop PARAMS ((struct loop *, int));
313 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
314 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
315 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
316 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
317 static int product_cheap_p PARAMS ((rtx, rtx));
318 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
319 int, int, int));
320 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
321 struct iv_class *, int,
322 basic_block, rtx));
323 static int last_use_this_basic_block PARAMS ((rtx, rtx));
324 static void record_initial PARAMS ((rtx, rtx, void *));
325 static void update_reg_last_use PARAMS ((rtx, rtx));
326 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
327 static void loop_regs_scan PARAMS ((const struct loop *, int));
328 static int count_insns_in_loop PARAMS ((const struct loop *));
329 static void load_mems PARAMS ((const struct loop *));
330 static int insert_loop_mem PARAMS ((rtx *, void *));
331 static int replace_loop_mem PARAMS ((rtx *, void *));
332 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
333 static int replace_loop_reg PARAMS ((rtx *, void *));
334 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
335 static void note_reg_stored PARAMS ((rtx, rtx, void *));
336 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
337 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
338 unsigned int));
339 static int replace_label PARAMS ((rtx *, void *));
340 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
341 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
342 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
343 static void loop_regs_update PARAMS ((const struct loop *, rtx));
344 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
346 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
347 rtx, rtx));
348 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
349 basic_block, rtx, rtx));
350 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
351 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
353 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
354 static void loop_delete_insns PARAMS ((rtx, rtx));
355 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
356 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
357 void debug_ivs PARAMS ((const struct loop *));
358 void debug_iv_class PARAMS ((const struct iv_class *));
359 void debug_biv PARAMS ((const struct induction *));
360 void debug_giv PARAMS ((const struct induction *));
361 void debug_loop PARAMS ((const struct loop *));
362 void debug_loops PARAMS ((const struct loops *));
364 typedef struct rtx_pair
366 rtx r1;
367 rtx r2;
368 } rtx_pair;
370 typedef struct loop_replace_args
372 rtx match;
373 rtx replacement;
374 rtx insn;
375 } loop_replace_args;
377 /* Nonzero iff INSN is between START and END, inclusive. */
378 #define INSN_IN_RANGE_P(INSN, START, END) \
379 (INSN_UID (INSN) < max_uid_for_loop \
380 && INSN_LUID (INSN) >= INSN_LUID (START) \
381 && INSN_LUID (INSN) <= INSN_LUID (END))
383 /* Indirect_jump_in_function is computed once per function. */
384 static int indirect_jump_in_function;
385 static int indirect_jump_in_function_p PARAMS ((rtx));
387 static int compute_luids PARAMS ((rtx, rtx, int));
389 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
390 struct induction *,
391 rtx));
393 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
394 copy the value of the strength reduced giv to its original register. */
395 static int copy_cost;
397 /* Cost of using a register, to normalize the benefits of a giv. */
398 static int reg_address_cost;
400 void
401 init_loop ()
403 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
405 reg_address_cost = address_cost (reg, SImode);
407 copy_cost = COSTS_N_INSNS (1);
410 /* Compute the mapping from uids to luids.
411 LUIDs are numbers assigned to insns, like uids,
412 except that luids increase monotonically through the code.
413 Start at insn START and stop just before END. Assign LUIDs
414 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
415 static int
416 compute_luids (start, end, prev_luid)
417 rtx start, end;
418 int prev_luid;
420 int i;
421 rtx insn;
423 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
425 if (INSN_UID (insn) >= max_uid_for_loop)
426 continue;
427 /* Don't assign luids to line-number NOTEs, so that the distance in
428 luids between two insns is not affected by -g. */
429 if (GET_CODE (insn) != NOTE
430 || NOTE_LINE_NUMBER (insn) <= 0)
431 uid_luid[INSN_UID (insn)] = ++i;
432 else
433 /* Give a line number note the same luid as preceding insn. */
434 uid_luid[INSN_UID (insn)] = i;
436 return i + 1;
439 /* Entry point of this file. Perform loop optimization
440 on the current function. F is the first insn of the function
441 and DUMPFILE is a stream for output of a trace of actions taken
442 (or 0 if none should be output). */
444 void
445 loop_optimize (f, dumpfile, flags)
446 /* f is the first instruction of a chain of insns for one function */
447 rtx f;
448 FILE *dumpfile;
449 int flags;
451 rtx insn;
452 int i;
453 struct loops loops_data;
454 struct loops *loops = &loops_data;
455 struct loop_info *loops_info;
457 loop_dump_stream = dumpfile;
459 init_recog_no_volatile ();
461 max_reg_before_loop = max_reg_num ();
462 loop_max_reg = max_reg_before_loop;
464 regs_may_share = 0;
466 /* Count the number of loops. */
468 max_loop_num = 0;
469 for (insn = f; insn; insn = NEXT_INSN (insn))
471 if (GET_CODE (insn) == NOTE
472 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
473 max_loop_num++;
476 /* Don't waste time if no loops. */
477 if (max_loop_num == 0)
478 return;
480 loops->num = max_loop_num;
482 /* Get size to use for tables indexed by uids.
483 Leave some space for labels allocated by find_and_verify_loops. */
484 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
486 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
487 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
488 sizeof (struct loop *));
490 /* Allocate storage for array of loops. */
491 loops->array = (struct loop *)
492 xcalloc (loops->num, sizeof (struct loop));
494 /* Find and process each loop.
495 First, find them, and record them in order of their beginnings. */
496 find_and_verify_loops (f, loops);
498 /* Allocate and initialize auxiliary loop information. */
499 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
500 for (i = 0; i < loops->num; i++)
501 loops->array[i].aux = loops_info + i;
503 /* Now find all register lifetimes. This must be done after
504 find_and_verify_loops, because it might reorder the insns in the
505 function. */
506 reg_scan (f, max_reg_before_loop, 1);
508 /* This must occur after reg_scan so that registers created by gcse
509 will have entries in the register tables.
511 We could have added a call to reg_scan after gcse_main in toplev.c,
512 but moving this call to init_alias_analysis is more efficient. */
513 init_alias_analysis ();
515 /* See if we went too far. Note that get_max_uid already returns
516 one more that the maximum uid of all insn. */
517 if (get_max_uid () > max_uid_for_loop)
518 abort ();
519 /* Now reset it to the actual size we need. See above. */
520 max_uid_for_loop = get_max_uid ();
522 /* find_and_verify_loops has already called compute_luids, but it
523 might have rearranged code afterwards, so we need to recompute
524 the luids now. */
525 max_luid = compute_luids (f, NULL_RTX, 0);
527 /* Don't leave gaps in uid_luid for insns that have been
528 deleted. It is possible that the first or last insn
529 using some register has been deleted by cross-jumping.
530 Make sure that uid_luid for that former insn's uid
531 points to the general area where that insn used to be. */
532 for (i = 0; i < max_uid_for_loop; i++)
534 uid_luid[0] = uid_luid[i];
535 if (uid_luid[0] != 0)
536 break;
538 for (i = 0; i < max_uid_for_loop; i++)
539 if (uid_luid[i] == 0)
540 uid_luid[i] = uid_luid[i - 1];
542 /* Determine if the function has indirect jump. On some systems
543 this prevents low overhead loop instructions from being used. */
544 indirect_jump_in_function = indirect_jump_in_function_p (f);
546 /* Now scan the loops, last ones first, since this means inner ones are done
547 before outer ones. */
548 for (i = max_loop_num - 1; i >= 0; i--)
550 struct loop *loop = &loops->array[i];
552 if (! loop->invalid && loop->end)
553 scan_loop (loop, flags);
556 end_alias_analysis ();
558 /* Clean up. */
559 free (uid_luid);
560 free (uid_loop);
561 free (loops_info);
562 free (loops->array);
565 /* Returns the next insn, in execution order, after INSN. START and
566 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
567 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
568 insn-stream; it is used with loops that are entered near the
569 bottom. */
571 static rtx
572 next_insn_in_loop (loop, insn)
573 const struct loop *loop;
574 rtx insn;
576 insn = NEXT_INSN (insn);
578 if (insn == loop->end)
580 if (loop->top)
581 /* Go to the top of the loop, and continue there. */
582 insn = loop->top;
583 else
584 /* We're done. */
585 insn = NULL_RTX;
588 if (insn == loop->scan_start)
589 /* We're done. */
590 insn = NULL_RTX;
592 return insn;
595 /* Optimize one loop described by LOOP. */
597 /* ??? Could also move memory writes out of loops if the destination address
598 is invariant, the source is invariant, the memory write is not volatile,
599 and if we can prove that no read inside the loop can read this address
600 before the write occurs. If there is a read of this address after the
601 write, then we can also mark the memory read as invariant. */
603 static void
604 scan_loop (loop, flags)
605 struct loop *loop;
606 int flags;
608 struct loop_info *loop_info = LOOP_INFO (loop);
609 struct loop_regs *regs = LOOP_REGS (loop);
610 int i;
611 rtx loop_start = loop->start;
612 rtx loop_end = loop->end;
613 rtx p;
614 /* 1 if we are scanning insns that could be executed zero times. */
615 int maybe_never = 0;
616 /* 1 if we are scanning insns that might never be executed
617 due to a subroutine call which might exit before they are reached. */
618 int call_passed = 0;
619 /* Jump insn that enters the loop, or 0 if control drops in. */
620 rtx loop_entry_jump = 0;
621 /* Number of insns in the loop. */
622 int insn_count;
623 int tem;
624 rtx temp, update_start, update_end;
625 /* The SET from an insn, if it is the only SET in the insn. */
626 rtx set, set1;
627 /* Chain describing insns movable in current loop. */
628 struct loop_movables *movables = LOOP_MOVABLES (loop);
629 /* Ratio of extra register life span we can justify
630 for saving an instruction. More if loop doesn't call subroutines
631 since in that case saving an insn makes more difference
632 and more registers are available. */
633 int threshold;
634 /* Nonzero if we are scanning instructions in a sub-loop. */
635 int loop_depth = 0;
637 loop->top = 0;
639 movables->head = 0;
640 movables->last = 0;
642 /* Determine whether this loop starts with a jump down to a test at
643 the end. This will occur for a small number of loops with a test
644 that is too complex to duplicate in front of the loop.
646 We search for the first insn or label in the loop, skipping NOTEs.
647 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
648 (because we might have a loop executed only once that contains a
649 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
650 (in case we have a degenerate loop).
652 Note that if we mistakenly think that a loop is entered at the top
653 when, in fact, it is entered at the exit test, the only effect will be
654 slightly poorer optimization. Making the opposite error can generate
655 incorrect code. Since very few loops now start with a jump to the
656 exit test, the code here to detect that case is very conservative. */
658 for (p = NEXT_INSN (loop_start);
659 p != loop_end
660 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
661 && (GET_CODE (p) != NOTE
662 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
663 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
664 p = NEXT_INSN (p))
667 loop->scan_start = p;
669 /* If loop end is the end of the current function, then emit a
670 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
671 note insn. This is the position we use when sinking insns out of
672 the loop. */
673 if (NEXT_INSN (loop->end) != 0)
674 loop->sink = NEXT_INSN (loop->end);
675 else
676 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
678 /* Set up variables describing this loop. */
679 prescan_loop (loop);
680 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
682 /* If loop has a jump before the first label,
683 the true entry is the target of that jump.
684 Start scan from there.
685 But record in LOOP->TOP the place where the end-test jumps
686 back to so we can scan that after the end of the loop. */
687 if (GET_CODE (p) == JUMP_INSN)
689 loop_entry_jump = p;
691 /* Loop entry must be unconditional jump (and not a RETURN) */
692 if (any_uncondjump_p (p)
693 && JUMP_LABEL (p) != 0
694 /* Check to see whether the jump actually
695 jumps out of the loop (meaning it's no loop).
696 This case can happen for things like
697 do {..} while (0). If this label was generated previously
698 by loop, we can't tell anything about it and have to reject
699 the loop. */
700 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
702 loop->top = next_label (loop->scan_start);
703 loop->scan_start = JUMP_LABEL (p);
707 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
708 as required by loop_reg_used_before_p. So skip such loops. (This
709 test may never be true, but it's best to play it safe.)
711 Also, skip loops where we do not start scanning at a label. This
712 test also rejects loops starting with a JUMP_INSN that failed the
713 test above. */
715 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
716 || GET_CODE (loop->scan_start) != CODE_LABEL)
718 if (loop_dump_stream)
719 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
720 INSN_UID (loop_start), INSN_UID (loop_end));
721 return;
724 /* Allocate extra space for REGs that might be created by load_mems.
725 We allocate a little extra slop as well, in the hopes that we
726 won't have to reallocate the regs array. */
727 loop_regs_scan (loop, loop_info->mems_idx + 16);
728 insn_count = count_insns_in_loop (loop);
730 if (loop_dump_stream)
732 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
733 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
734 if (loop->cont)
735 fprintf (loop_dump_stream, "Continue at insn %d.\n",
736 INSN_UID (loop->cont));
739 /* Scan through the loop finding insns that are safe to move.
740 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
741 this reg will be considered invariant for subsequent insns.
742 We consider whether subsequent insns use the reg
743 in deciding whether it is worth actually moving.
745 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
746 and therefore it is possible that the insns we are scanning
747 would never be executed. At such times, we must make sure
748 that it is safe to execute the insn once instead of zero times.
749 When MAYBE_NEVER is 0, all insns will be executed at least once
750 so that is not a problem. */
752 for (p = next_insn_in_loop (loop, loop->scan_start);
753 p != NULL_RTX;
754 p = next_insn_in_loop (loop, p))
756 if (GET_CODE (p) == INSN
757 && (set = single_set (p))
758 && GET_CODE (SET_DEST (set)) == REG
759 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
760 && SET_DEST (set) != pic_offset_table_rtx
761 #endif
762 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
764 int tem1 = 0;
765 int tem2 = 0;
766 int move_insn = 0;
767 rtx src = SET_SRC (set);
768 rtx dependencies = 0;
770 /* Figure out what to use as a source of this insn. If a REG_EQUIV
771 note is given or if a REG_EQUAL note with a constant operand is
772 specified, use it as the source and mark that we should move
773 this insn by calling emit_move_insn rather that duplicating the
774 insn.
776 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
777 is present. */
778 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
779 if (temp)
780 src = XEXP (temp, 0), move_insn = 1;
781 else
783 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
784 if (temp && CONSTANT_P (XEXP (temp, 0)))
785 src = XEXP (temp, 0), move_insn = 1;
786 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
788 src = XEXP (temp, 0);
789 /* A libcall block can use regs that don't appear in
790 the equivalent expression. To move the libcall,
791 we must move those regs too. */
792 dependencies = libcall_other_reg (p, src);
796 /* For parallels, add any possible uses to the depencies, as we can't move
797 the insn without resolving them first. */
798 if (GET_CODE (PATTERN (p)) == PARALLEL)
800 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
802 rtx x = XVECEXP (PATTERN (p), 0, i);
803 if (GET_CODE (x) == USE)
804 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
808 /* Don't try to optimize a register that was made
809 by loop-optimization for an inner loop.
810 We don't know its life-span, so we can't compute the benefit. */
811 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
813 else if (/* The register is used in basic blocks other
814 than the one where it is set (meaning that
815 something after this point in the loop might
816 depend on its value before the set). */
817 ! reg_in_basic_block_p (p, SET_DEST (set))
818 /* And the set is not guaranteed to be executed once
819 the loop starts, or the value before the set is
820 needed before the set occurs...
822 ??? Note we have quadratic behaviour here, mitigated
823 by the fact that the previous test will often fail for
824 large loops. Rather than re-scanning the entire loop
825 each time for register usage, we should build tables
826 of the register usage and use them here instead. */
827 && (maybe_never
828 || loop_reg_used_before_p (loop, set, p)))
829 /* It is unsafe to move the set.
831 This code used to consider it OK to move a set of a variable
832 which was not created by the user and not used in an exit test.
833 That behavior is incorrect and was removed. */
835 else if ((tem = loop_invariant_p (loop, src))
836 && (dependencies == 0
837 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
838 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
839 || (tem1
840 = consec_sets_invariant_p
841 (loop, SET_DEST (set),
842 regs->array[REGNO (SET_DEST (set))].set_in_loop,
843 p)))
844 /* If the insn can cause a trap (such as divide by zero),
845 can't move it unless it's guaranteed to be executed
846 once loop is entered. Even a function call might
847 prevent the trap insn from being reached
848 (since it might exit!) */
849 && ! ((maybe_never || call_passed)
850 && may_trap_p (src)))
852 struct movable *m;
853 int regno = REGNO (SET_DEST (set));
855 /* A potential lossage is where we have a case where two insns
856 can be combined as long as they are both in the loop, but
857 we move one of them outside the loop. For large loops,
858 this can lose. The most common case of this is the address
859 of a function being called.
861 Therefore, if this register is marked as being used exactly
862 once if we are in a loop with calls (a "large loop"), see if
863 we can replace the usage of this register with the source
864 of this SET. If we can, delete this insn.
866 Don't do this if P has a REG_RETVAL note or if we have
867 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
869 if (loop_info->has_call
870 && regs->array[regno].single_usage != 0
871 && regs->array[regno].single_usage != const0_rtx
872 && REGNO_FIRST_UID (regno) == INSN_UID (p)
873 && (REGNO_LAST_UID (regno)
874 == INSN_UID (regs->array[regno].single_usage))
875 && regs->array[regno].set_in_loop == 1
876 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
877 && ! side_effects_p (SET_SRC (set))
878 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
879 && (! SMALL_REGISTER_CLASSES
880 || (! (GET_CODE (SET_SRC (set)) == REG
881 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
882 /* This test is not redundant; SET_SRC (set) might be
883 a call-clobbered register and the life of REGNO
884 might span a call. */
885 && ! modified_between_p (SET_SRC (set), p,
886 regs->array[regno].single_usage)
887 && no_labels_between_p (p, regs->array[regno].single_usage)
888 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
889 regs->array[regno].single_usage))
891 /* Replace any usage in a REG_EQUAL note. Must copy the
892 new source, so that we don't get rtx sharing between the
893 SET_SOURCE and REG_NOTES of insn p. */
894 REG_NOTES (regs->array[regno].single_usage)
895 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
896 SET_DEST (set), copy_rtx (SET_SRC (set)));
898 delete_insn (p);
899 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
900 regs->array[regno+i].set_in_loop = 0;
901 continue;
904 m = (struct movable *) xmalloc (sizeof (struct movable));
905 m->next = 0;
906 m->insn = p;
907 m->set_src = src;
908 m->dependencies = dependencies;
909 m->set_dest = SET_DEST (set);
910 m->force = 0;
911 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
912 m->done = 0;
913 m->forces = 0;
914 m->partial = 0;
915 m->move_insn = move_insn;
916 m->move_insn_first = 0;
917 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
918 m->savemode = VOIDmode;
919 m->regno = regno;
920 /* Set M->cond if either loop_invariant_p
921 or consec_sets_invariant_p returned 2
922 (only conditionally invariant). */
923 m->cond = ((tem | tem1 | tem2) > 1);
924 m->global = LOOP_REG_GLOBAL_P (loop, regno);
925 m->match = 0;
926 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
927 m->savings = regs->array[regno].n_times_set;
928 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
929 m->savings += libcall_benefit (p);
930 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
931 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
932 /* Add M to the end of the chain MOVABLES. */
933 loop_movables_add (movables, m);
935 if (m->consec > 0)
937 /* It is possible for the first instruction to have a
938 REG_EQUAL note but a non-invariant SET_SRC, so we must
939 remember the status of the first instruction in case
940 the last instruction doesn't have a REG_EQUAL note. */
941 m->move_insn_first = m->move_insn;
943 /* Skip this insn, not checking REG_LIBCALL notes. */
944 p = next_nonnote_insn (p);
945 /* Skip the consecutive insns, if there are any. */
946 p = skip_consec_insns (p, m->consec);
947 /* Back up to the last insn of the consecutive group. */
948 p = prev_nonnote_insn (p);
950 /* We must now reset m->move_insn, m->is_equiv, and possibly
951 m->set_src to correspond to the effects of all the
952 insns. */
953 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
954 if (temp)
955 m->set_src = XEXP (temp, 0), m->move_insn = 1;
956 else
958 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
959 if (temp && CONSTANT_P (XEXP (temp, 0)))
960 m->set_src = XEXP (temp, 0), m->move_insn = 1;
961 else
962 m->move_insn = 0;
965 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
968 /* If this register is always set within a STRICT_LOW_PART
969 or set to zero, then its high bytes are constant.
970 So clear them outside the loop and within the loop
971 just load the low bytes.
972 We must check that the machine has an instruction to do so.
973 Also, if the value loaded into the register
974 depends on the same register, this cannot be done. */
975 else if (SET_SRC (set) == const0_rtx
976 && GET_CODE (NEXT_INSN (p)) == INSN
977 && (set1 = single_set (NEXT_INSN (p)))
978 && GET_CODE (set1) == SET
979 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
980 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
981 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
982 == SET_DEST (set))
983 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
985 int regno = REGNO (SET_DEST (set));
986 if (regs->array[regno].set_in_loop == 2)
988 struct movable *m;
989 m = (struct movable *) xmalloc (sizeof (struct movable));
990 m->next = 0;
991 m->insn = p;
992 m->set_dest = SET_DEST (set);
993 m->dependencies = 0;
994 m->force = 0;
995 m->consec = 0;
996 m->done = 0;
997 m->forces = 0;
998 m->move_insn = 0;
999 m->move_insn_first = 0;
1000 m->partial = 1;
1001 /* If the insn may not be executed on some cycles,
1002 we can't clear the whole reg; clear just high part.
1003 Not even if the reg is used only within this loop.
1004 Consider this:
1005 while (1)
1006 while (s != t) {
1007 if (foo ()) x = *s;
1008 use (x);
1010 Clearing x before the inner loop could clobber a value
1011 being saved from the last time around the outer loop.
1012 However, if the reg is not used outside this loop
1013 and all uses of the register are in the same
1014 basic block as the store, there is no problem.
1016 If this insn was made by loop, we don't know its
1017 INSN_LUID and hence must make a conservative
1018 assumption. */
1019 m->global = (INSN_UID (p) >= max_uid_for_loop
1020 || LOOP_REG_GLOBAL_P (loop, regno)
1021 || (labels_in_range_p
1022 (p, REGNO_FIRST_LUID (regno))));
1023 if (maybe_never && m->global)
1024 m->savemode = GET_MODE (SET_SRC (set1));
1025 else
1026 m->savemode = VOIDmode;
1027 m->regno = regno;
1028 m->cond = 0;
1029 m->match = 0;
1030 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1031 m->savings = 1;
1032 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1033 regs->array[regno+i].set_in_loop = -1;
1034 /* Add M to the end of the chain MOVABLES. */
1035 loop_movables_add (movables, m);
1039 /* Past a call insn, we get to insns which might not be executed
1040 because the call might exit. This matters for insns that trap.
1041 Constant and pure call insns always return, so they don't count. */
1042 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1043 call_passed = 1;
1044 /* Past a label or a jump, we get to insns for which we
1045 can't count on whether or how many times they will be
1046 executed during each iteration. Therefore, we can
1047 only move out sets of trivial variables
1048 (those not used after the loop). */
1049 /* Similar code appears twice in strength_reduce. */
1050 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1051 /* If we enter the loop in the middle, and scan around to the
1052 beginning, don't set maybe_never for that. This must be an
1053 unconditional jump, otherwise the code at the top of the
1054 loop might never be executed. Unconditional jumps are
1055 followed by a barrier then the loop_end. */
1056 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1057 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1058 && any_uncondjump_p (p)))
1059 maybe_never = 1;
1060 else if (GET_CODE (p) == NOTE)
1062 /* At the virtual top of a converted loop, insns are again known to
1063 be executed: logically, the loop begins here even though the exit
1064 code has been duplicated. */
1065 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1066 maybe_never = call_passed = 0;
1067 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1068 loop_depth++;
1069 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1070 loop_depth--;
1074 /* If one movable subsumes another, ignore that other. */
1076 ignore_some_movables (movables);
1078 /* For each movable insn, see if the reg that it loads
1079 leads when it dies right into another conditionally movable insn.
1080 If so, record that the second insn "forces" the first one,
1081 since the second can be moved only if the first is. */
1083 force_movables (movables);
1085 /* See if there are multiple movable insns that load the same value.
1086 If there are, make all but the first point at the first one
1087 through the `match' field, and add the priorities of them
1088 all together as the priority of the first. */
1090 combine_movables (movables, regs);
1092 /* Now consider each movable insn to decide whether it is worth moving.
1093 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1095 Generally this increases code size, so do not move moveables when
1096 optimizing for code size. */
1098 if (! optimize_size)
1100 move_movables (loop, movables, threshold, insn_count);
1102 /* Recalculate regs->array if move_movables has created new
1103 registers. */
1104 if (max_reg_num () > regs->num)
1106 loop_regs_scan (loop, 0);
1107 for (update_start = loop_start;
1108 PREV_INSN (update_start)
1109 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1110 update_start = PREV_INSN (update_start))
1112 update_end = NEXT_INSN (loop_end);
1114 reg_scan_update (update_start, update_end, loop_max_reg);
1115 loop_max_reg = max_reg_num ();
1119 /* Now candidates that still are negative are those not moved.
1120 Change regs->array[I].set_in_loop to indicate that those are not actually
1121 invariant. */
1122 for (i = 0; i < regs->num; i++)
1123 if (regs->array[i].set_in_loop < 0)
1124 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1126 /* Now that we've moved some things out of the loop, we might be able to
1127 hoist even more memory references. */
1128 load_mems (loop);
1130 /* Recalculate regs->array if load_mems has created new registers. */
1131 if (max_reg_num () > regs->num)
1132 loop_regs_scan (loop, 0);
1134 for (update_start = loop_start;
1135 PREV_INSN (update_start)
1136 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1137 update_start = PREV_INSN (update_start))
1139 update_end = NEXT_INSN (loop_end);
1141 reg_scan_update (update_start, update_end, loop_max_reg);
1142 loop_max_reg = max_reg_num ();
1144 if (flag_strength_reduce)
1146 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1147 /* Ensure our label doesn't go away. */
1148 LABEL_NUSES (update_end)++;
1150 strength_reduce (loop, flags);
1152 reg_scan_update (update_start, update_end, loop_max_reg);
1153 loop_max_reg = max_reg_num ();
1155 if (update_end && GET_CODE (update_end) == CODE_LABEL
1156 && --LABEL_NUSES (update_end) == 0)
1157 delete_related_insns (update_end);
1161 /* The movable information is required for strength reduction. */
1162 loop_movables_free (movables);
1164 free (regs->array);
1165 regs->array = 0;
1166 regs->num = 0;
1169 /* Add elements to *OUTPUT to record all the pseudo-regs
1170 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1172 void
1173 record_excess_regs (in_this, not_in_this, output)
1174 rtx in_this, not_in_this;
1175 rtx *output;
1177 enum rtx_code code;
1178 const char *fmt;
1179 int i;
1181 code = GET_CODE (in_this);
1183 switch (code)
1185 case PC:
1186 case CC0:
1187 case CONST_INT:
1188 case CONST_DOUBLE:
1189 case CONST:
1190 case SYMBOL_REF:
1191 case LABEL_REF:
1192 return;
1194 case REG:
1195 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1196 && ! reg_mentioned_p (in_this, not_in_this))
1197 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1198 return;
1200 default:
1201 break;
1204 fmt = GET_RTX_FORMAT (code);
1205 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1207 int j;
1209 switch (fmt[i])
1211 case 'E':
1212 for (j = 0; j < XVECLEN (in_this, i); j++)
1213 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1214 break;
1216 case 'e':
1217 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1218 break;
1223 /* Check what regs are referred to in the libcall block ending with INSN,
1224 aside from those mentioned in the equivalent value.
1225 If there are none, return 0.
1226 If there are one or more, return an EXPR_LIST containing all of them. */
1229 libcall_other_reg (insn, equiv)
1230 rtx insn, equiv;
1232 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1233 rtx p = XEXP (note, 0);
1234 rtx output = 0;
1236 /* First, find all the regs used in the libcall block
1237 that are not mentioned as inputs to the result. */
1239 while (p != insn)
1241 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1242 || GET_CODE (p) == CALL_INSN)
1243 record_excess_regs (PATTERN (p), equiv, &output);
1244 p = NEXT_INSN (p);
1247 return output;
1250 /* Return 1 if all uses of REG
1251 are between INSN and the end of the basic block. */
1253 static int
1254 reg_in_basic_block_p (insn, reg)
1255 rtx insn, reg;
1257 int regno = REGNO (reg);
1258 rtx p;
1260 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1261 return 0;
1263 /* Search this basic block for the already recorded last use of the reg. */
1264 for (p = insn; p; p = NEXT_INSN (p))
1266 switch (GET_CODE (p))
1268 case NOTE:
1269 break;
1271 case INSN:
1272 case CALL_INSN:
1273 /* Ordinary insn: if this is the last use, we win. */
1274 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1275 return 1;
1276 break;
1278 case JUMP_INSN:
1279 /* Jump insn: if this is the last use, we win. */
1280 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1281 return 1;
1282 /* Otherwise, it's the end of the basic block, so we lose. */
1283 return 0;
1285 case CODE_LABEL:
1286 case BARRIER:
1287 /* It's the end of the basic block, so we lose. */
1288 return 0;
1290 default:
1291 break;
1295 /* The "last use" that was recorded can't be found after the first
1296 use. This can happen when the last use was deleted while
1297 processing an inner loop, this inner loop was then completely
1298 unrolled, and the outer loop is always exited after the inner loop,
1299 so that everything after the first use becomes a single basic block. */
1300 return 1;
1303 /* Compute the benefit of eliminating the insns in the block whose
1304 last insn is LAST. This may be a group of insns used to compute a
1305 value directly or can contain a library call. */
1307 static int
1308 libcall_benefit (last)
1309 rtx last;
1311 rtx insn;
1312 int benefit = 0;
1314 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1315 insn != last; insn = NEXT_INSN (insn))
1317 if (GET_CODE (insn) == CALL_INSN)
1318 benefit += 10; /* Assume at least this many insns in a library
1319 routine. */
1320 else if (GET_CODE (insn) == INSN
1321 && GET_CODE (PATTERN (insn)) != USE
1322 && GET_CODE (PATTERN (insn)) != CLOBBER)
1323 benefit++;
1326 return benefit;
1329 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1331 static rtx
1332 skip_consec_insns (insn, count)
1333 rtx insn;
1334 int count;
1336 for (; count > 0; count--)
1338 rtx temp;
1340 /* If first insn of libcall sequence, skip to end. */
1341 /* Do this at start of loop, since INSN is guaranteed to
1342 be an insn here. */
1343 if (GET_CODE (insn) != NOTE
1344 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1345 insn = XEXP (temp, 0);
1348 insn = NEXT_INSN (insn);
1349 while (GET_CODE (insn) == NOTE);
1352 return insn;
1355 /* Ignore any movable whose insn falls within a libcall
1356 which is part of another movable.
1357 We make use of the fact that the movable for the libcall value
1358 was made later and so appears later on the chain. */
1360 static void
1361 ignore_some_movables (movables)
1362 struct loop_movables *movables;
1364 struct movable *m, *m1;
1366 for (m = movables->head; m; m = m->next)
1368 /* Is this a movable for the value of a libcall? */
1369 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1370 if (note)
1372 rtx insn;
1373 /* Check for earlier movables inside that range,
1374 and mark them invalid. We cannot use LUIDs here because
1375 insns created by loop.c for prior loops don't have LUIDs.
1376 Rather than reject all such insns from movables, we just
1377 explicitly check each insn in the libcall (since invariant
1378 libcalls aren't that common). */
1379 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1380 for (m1 = movables->head; m1 != m; m1 = m1->next)
1381 if (m1->insn == insn)
1382 m1->done = 1;
1387 /* For each movable insn, see if the reg that it loads
1388 leads when it dies right into another conditionally movable insn.
1389 If so, record that the second insn "forces" the first one,
1390 since the second can be moved only if the first is. */
1392 static void
1393 force_movables (movables)
1394 struct loop_movables *movables;
1396 struct movable *m, *m1;
1398 for (m1 = movables->head; m1; m1 = m1->next)
1399 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1400 if (!m1->partial && !m1->done)
1402 int regno = m1->regno;
1403 for (m = m1->next; m; m = m->next)
1404 /* ??? Could this be a bug? What if CSE caused the
1405 register of M1 to be used after this insn?
1406 Since CSE does not update regno_last_uid,
1407 this insn M->insn might not be where it dies.
1408 But very likely this doesn't matter; what matters is
1409 that M's reg is computed from M1's reg. */
1410 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1411 && !m->done)
1412 break;
1413 if (m != 0 && m->set_src == m1->set_dest
1414 /* If m->consec, m->set_src isn't valid. */
1415 && m->consec == 0)
1416 m = 0;
1418 /* Increase the priority of the moving the first insn
1419 since it permits the second to be moved as well. */
1420 if (m != 0)
1422 m->forces = m1;
1423 m1->lifetime += m->lifetime;
1424 m1->savings += m->savings;
1429 /* Find invariant expressions that are equal and can be combined into
1430 one register. */
1432 static void
1433 combine_movables (movables, regs)
1434 struct loop_movables *movables;
1435 struct loop_regs *regs;
1437 struct movable *m;
1438 char *matched_regs = (char *) xmalloc (regs->num);
1439 enum machine_mode mode;
1441 /* Regs that are set more than once are not allowed to match
1442 or be matched. I'm no longer sure why not. */
1443 /* Only pseudo registers are allowed to match or be matched,
1444 since move_movables does not validate the change. */
1445 /* Perhaps testing m->consec_sets would be more appropriate here? */
1447 for (m = movables->head; m; m = m->next)
1448 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1449 && m->regno >= FIRST_PSEUDO_REGISTER
1450 && !m->partial)
1452 struct movable *m1;
1453 int regno = m->regno;
1455 memset (matched_regs, 0, regs->num);
1456 matched_regs[regno] = 1;
1458 /* We want later insns to match the first one. Don't make the first
1459 one match any later ones. So start this loop at m->next. */
1460 for (m1 = m->next; m1; m1 = m1->next)
1461 if (m != m1 && m1->match == 0
1462 && regs->array[m1->regno].n_times_set == 1
1463 && m1->regno >= FIRST_PSEUDO_REGISTER
1464 /* A reg used outside the loop mustn't be eliminated. */
1465 && !m1->global
1466 /* A reg used for zero-extending mustn't be eliminated. */
1467 && !m1->partial
1468 && (matched_regs[m1->regno]
1471 /* Can combine regs with different modes loaded from the
1472 same constant only if the modes are the same or
1473 if both are integer modes with M wider or the same
1474 width as M1. The check for integer is redundant, but
1475 safe, since the only case of differing destination
1476 modes with equal sources is when both sources are
1477 VOIDmode, i.e., CONST_INT. */
1478 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1479 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1480 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1481 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1482 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1483 /* See if the source of M1 says it matches M. */
1484 && ((GET_CODE (m1->set_src) == REG
1485 && matched_regs[REGNO (m1->set_src)])
1486 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1487 movables, regs))))
1488 && ((m->dependencies == m1->dependencies)
1489 || rtx_equal_p (m->dependencies, m1->dependencies)))
1491 m->lifetime += m1->lifetime;
1492 m->savings += m1->savings;
1493 m1->done = 1;
1494 m1->match = m;
1495 matched_regs[m1->regno] = 1;
1499 /* Now combine the regs used for zero-extension.
1500 This can be done for those not marked `global'
1501 provided their lives don't overlap. */
1503 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1504 mode = GET_MODE_WIDER_MODE (mode))
1506 struct movable *m0 = 0;
1508 /* Combine all the registers for extension from mode MODE.
1509 Don't combine any that are used outside this loop. */
1510 for (m = movables->head; m; m = m->next)
1511 if (m->partial && ! m->global
1512 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1514 struct movable *m1;
1516 int first = REGNO_FIRST_LUID (m->regno);
1517 int last = REGNO_LAST_LUID (m->regno);
1519 if (m0 == 0)
1521 /* First one: don't check for overlap, just record it. */
1522 m0 = m;
1523 continue;
1526 /* Make sure they extend to the same mode.
1527 (Almost always true.) */
1528 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1529 continue;
1531 /* We already have one: check for overlap with those
1532 already combined together. */
1533 for (m1 = movables->head; m1 != m; m1 = m1->next)
1534 if (m1 == m0 || (m1->partial && m1->match == m0))
1535 if (! (REGNO_FIRST_LUID (m1->regno) > last
1536 || REGNO_LAST_LUID (m1->regno) < first))
1537 goto overlap;
1539 /* No overlap: we can combine this with the others. */
1540 m0->lifetime += m->lifetime;
1541 m0->savings += m->savings;
1542 m->done = 1;
1543 m->match = m0;
1545 overlap:
1550 /* Clean up. */
1551 free (matched_regs);
1554 /* Returns the number of movable instructions in LOOP that were not
1555 moved outside the loop. */
1557 static int
1558 num_unmoved_movables (loop)
1559 const struct loop *loop;
1561 int num = 0;
1562 struct movable *m;
1564 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1565 if (!m->done)
1566 ++num;
1568 return num;
1572 /* Return 1 if regs X and Y will become the same if moved. */
1574 static int
1575 regs_match_p (x, y, movables)
1576 rtx x, y;
1577 struct loop_movables *movables;
1579 unsigned int xn = REGNO (x);
1580 unsigned int yn = REGNO (y);
1581 struct movable *mx, *my;
1583 for (mx = movables->head; mx; mx = mx->next)
1584 if (mx->regno == xn)
1585 break;
1587 for (my = movables->head; my; my = my->next)
1588 if (my->regno == yn)
1589 break;
1591 return (mx && my
1592 && ((mx->match == my->match && mx->match != 0)
1593 || mx->match == my
1594 || mx == my->match));
1597 /* Return 1 if X and Y are identical-looking rtx's.
1598 This is the Lisp function EQUAL for rtx arguments.
1600 If two registers are matching movables or a movable register and an
1601 equivalent constant, consider them equal. */
1603 static int
1604 rtx_equal_for_loop_p (x, y, movables, regs)
1605 rtx x, y;
1606 struct loop_movables *movables;
1607 struct loop_regs *regs;
1609 int i;
1610 int j;
1611 struct movable *m;
1612 enum rtx_code code;
1613 const char *fmt;
1615 if (x == y)
1616 return 1;
1617 if (x == 0 || y == 0)
1618 return 0;
1620 code = GET_CODE (x);
1622 /* If we have a register and a constant, they may sometimes be
1623 equal. */
1624 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1625 && CONSTANT_P (y))
1627 for (m = movables->head; m; m = m->next)
1628 if (m->move_insn && m->regno == REGNO (x)
1629 && rtx_equal_p (m->set_src, y))
1630 return 1;
1632 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1633 && CONSTANT_P (x))
1635 for (m = movables->head; m; m = m->next)
1636 if (m->move_insn && m->regno == REGNO (y)
1637 && rtx_equal_p (m->set_src, x))
1638 return 1;
1641 /* Otherwise, rtx's of different codes cannot be equal. */
1642 if (code != GET_CODE (y))
1643 return 0;
1645 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1646 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1648 if (GET_MODE (x) != GET_MODE (y))
1649 return 0;
1651 /* These three types of rtx's can be compared nonrecursively. */
1652 if (code == REG)
1653 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1655 if (code == LABEL_REF)
1656 return XEXP (x, 0) == XEXP (y, 0);
1657 if (code == SYMBOL_REF)
1658 return XSTR (x, 0) == XSTR (y, 0);
1660 /* Compare the elements. If any pair of corresponding elements
1661 fail to match, return 0 for the whole things. */
1663 fmt = GET_RTX_FORMAT (code);
1664 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1666 switch (fmt[i])
1668 case 'w':
1669 if (XWINT (x, i) != XWINT (y, i))
1670 return 0;
1671 break;
1673 case 'i':
1674 if (XINT (x, i) != XINT (y, i))
1675 return 0;
1676 break;
1678 case 'E':
1679 /* Two vectors must have the same length. */
1680 if (XVECLEN (x, i) != XVECLEN (y, i))
1681 return 0;
1683 /* And the corresponding elements must match. */
1684 for (j = 0; j < XVECLEN (x, i); j++)
1685 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1686 movables, regs) == 0)
1687 return 0;
1688 break;
1690 case 'e':
1691 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1692 == 0)
1693 return 0;
1694 break;
1696 case 's':
1697 if (strcmp (XSTR (x, i), XSTR (y, i)))
1698 return 0;
1699 break;
1701 case 'u':
1702 /* These are just backpointers, so they don't matter. */
1703 break;
1705 case '0':
1706 break;
1708 /* It is believed that rtx's at this level will never
1709 contain anything but integers and other rtx's,
1710 except for within LABEL_REFs and SYMBOL_REFs. */
1711 default:
1712 abort ();
1715 return 1;
1718 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1719 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1720 references is incremented once for each added note. */
1722 static void
1723 add_label_notes (x, insns)
1724 rtx x;
1725 rtx insns;
1727 enum rtx_code code = GET_CODE (x);
1728 int i, j;
1729 const char *fmt;
1730 rtx insn;
1732 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1734 /* This code used to ignore labels that referred to dispatch tables to
1735 avoid flow generating (slighly) worse code.
1737 We no longer ignore such label references (see LABEL_REF handling in
1738 mark_jump_label for additional information). */
1739 for (insn = insns; insn; insn = NEXT_INSN (insn))
1740 if (reg_mentioned_p (XEXP (x, 0), insn))
1742 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1743 REG_NOTES (insn));
1744 if (LABEL_P (XEXP (x, 0)))
1745 LABEL_NUSES (XEXP (x, 0))++;
1749 fmt = GET_RTX_FORMAT (code);
1750 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1752 if (fmt[i] == 'e')
1753 add_label_notes (XEXP (x, i), insns);
1754 else if (fmt[i] == 'E')
1755 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1756 add_label_notes (XVECEXP (x, i, j), insns);
1760 /* Scan MOVABLES, and move the insns that deserve to be moved.
1761 If two matching movables are combined, replace one reg with the
1762 other throughout. */
1764 static void
1765 move_movables (loop, movables, threshold, insn_count)
1766 struct loop *loop;
1767 struct loop_movables *movables;
1768 int threshold;
1769 int insn_count;
1771 struct loop_regs *regs = LOOP_REGS (loop);
1772 int nregs = regs->num;
1773 rtx new_start = 0;
1774 struct movable *m;
1775 rtx p;
1776 rtx loop_start = loop->start;
1777 rtx loop_end = loop->end;
1778 /* Map of pseudo-register replacements to handle combining
1779 when we move several insns that load the same value
1780 into different pseudo-registers. */
1781 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1782 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1784 for (m = movables->head; m; m = m->next)
1786 /* Describe this movable insn. */
1788 if (loop_dump_stream)
1790 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1791 INSN_UID (m->insn), m->regno, m->lifetime);
1792 if (m->consec > 0)
1793 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1794 if (m->cond)
1795 fprintf (loop_dump_stream, "cond ");
1796 if (m->force)
1797 fprintf (loop_dump_stream, "force ");
1798 if (m->global)
1799 fprintf (loop_dump_stream, "global ");
1800 if (m->done)
1801 fprintf (loop_dump_stream, "done ");
1802 if (m->move_insn)
1803 fprintf (loop_dump_stream, "move-insn ");
1804 if (m->match)
1805 fprintf (loop_dump_stream, "matches %d ",
1806 INSN_UID (m->match->insn));
1807 if (m->forces)
1808 fprintf (loop_dump_stream, "forces %d ",
1809 INSN_UID (m->forces->insn));
1812 /* Ignore the insn if it's already done (it matched something else).
1813 Otherwise, see if it is now safe to move. */
1815 if (!m->done
1816 && (! m->cond
1817 || (1 == loop_invariant_p (loop, m->set_src)
1818 && (m->dependencies == 0
1819 || 1 == loop_invariant_p (loop, m->dependencies))
1820 && (m->consec == 0
1821 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1822 m->consec + 1,
1823 m->insn))))
1824 && (! m->forces || m->forces->done))
1826 int regno;
1827 rtx p;
1828 int savings = m->savings;
1830 /* We have an insn that is safe to move.
1831 Compute its desirability. */
1833 p = m->insn;
1834 regno = m->regno;
1836 if (loop_dump_stream)
1837 fprintf (loop_dump_stream, "savings %d ", savings);
1839 if (regs->array[regno].moved_once && loop_dump_stream)
1840 fprintf (loop_dump_stream, "halved since already moved ");
1842 /* An insn MUST be moved if we already moved something else
1843 which is safe only if this one is moved too: that is,
1844 if already_moved[REGNO] is nonzero. */
1846 /* An insn is desirable to move if the new lifetime of the
1847 register is no more than THRESHOLD times the old lifetime.
1848 If it's not desirable, it means the loop is so big
1849 that moving won't speed things up much,
1850 and it is liable to make register usage worse. */
1852 /* It is also desirable to move if it can be moved at no
1853 extra cost because something else was already moved. */
1855 if (already_moved[regno]
1856 || flag_move_all_movables
1857 || (threshold * savings * m->lifetime) >=
1858 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1859 || (m->forces && m->forces->done
1860 && regs->array[m->forces->regno].n_times_set == 1))
1862 int count;
1863 struct movable *m1;
1864 rtx first = NULL_RTX;
1866 /* Now move the insns that set the reg. */
1868 if (m->partial && m->match)
1870 rtx newpat, i1;
1871 rtx r1, r2;
1872 /* Find the end of this chain of matching regs.
1873 Thus, we load each reg in the chain from that one reg.
1874 And that reg is loaded with 0 directly,
1875 since it has ->match == 0. */
1876 for (m1 = m; m1->match; m1 = m1->match);
1877 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1878 SET_DEST (PATTERN (m1->insn)));
1879 i1 = loop_insn_hoist (loop, newpat);
1881 /* Mark the moved, invariant reg as being allowed to
1882 share a hard reg with the other matching invariant. */
1883 REG_NOTES (i1) = REG_NOTES (m->insn);
1884 r1 = SET_DEST (PATTERN (m->insn));
1885 r2 = SET_DEST (PATTERN (m1->insn));
1886 regs_may_share
1887 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1888 gen_rtx_EXPR_LIST (VOIDmode, r2,
1889 regs_may_share));
1890 delete_insn (m->insn);
1892 if (new_start == 0)
1893 new_start = i1;
1895 if (loop_dump_stream)
1896 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1898 /* If we are to re-generate the item being moved with a
1899 new move insn, first delete what we have and then emit
1900 the move insn before the loop. */
1901 else if (m->move_insn)
1903 rtx i1, temp, seq;
1905 for (count = m->consec; count >= 0; count--)
1907 /* If this is the first insn of a library call sequence,
1908 skip to the end. */
1909 if (GET_CODE (p) != NOTE
1910 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1911 p = XEXP (temp, 0);
1913 /* If this is the last insn of a libcall sequence, then
1914 delete every insn in the sequence except the last.
1915 The last insn is handled in the normal manner. */
1916 if (GET_CODE (p) != NOTE
1917 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1919 temp = XEXP (temp, 0);
1920 while (temp != p)
1921 temp = delete_insn (temp);
1924 temp = p;
1925 p = delete_insn (p);
1927 /* simplify_giv_expr expects that it can walk the insns
1928 at m->insn forwards and see this old sequence we are
1929 tossing here. delete_insn does preserve the next
1930 pointers, but when we skip over a NOTE we must fix
1931 it up. Otherwise that code walks into the non-deleted
1932 insn stream. */
1933 while (p && GET_CODE (p) == NOTE)
1934 p = NEXT_INSN (temp) = NEXT_INSN (p);
1937 start_sequence ();
1938 emit_move_insn (m->set_dest, m->set_src);
1939 seq = get_insns ();
1940 end_sequence ();
1942 add_label_notes (m->set_src, seq);
1944 i1 = loop_insn_hoist (loop, seq);
1945 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1946 set_unique_reg_note (i1,
1947 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1948 m->set_src);
1950 if (loop_dump_stream)
1951 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1953 /* The more regs we move, the less we like moving them. */
1954 threshold -= 3;
1956 else
1958 for (count = m->consec; count >= 0; count--)
1960 rtx i1, temp;
1962 /* If first insn of libcall sequence, skip to end. */
1963 /* Do this at start of loop, since p is guaranteed to
1964 be an insn here. */
1965 if (GET_CODE (p) != NOTE
1966 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1967 p = XEXP (temp, 0);
1969 /* If last insn of libcall sequence, move all
1970 insns except the last before the loop. The last
1971 insn is handled in the normal manner. */
1972 if (GET_CODE (p) != NOTE
1973 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1975 rtx fn_address = 0;
1976 rtx fn_reg = 0;
1977 rtx fn_address_insn = 0;
1979 first = 0;
1980 for (temp = XEXP (temp, 0); temp != p;
1981 temp = NEXT_INSN (temp))
1983 rtx body;
1984 rtx n;
1985 rtx next;
1987 if (GET_CODE (temp) == NOTE)
1988 continue;
1990 body = PATTERN (temp);
1992 /* Find the next insn after TEMP,
1993 not counting USE or NOTE insns. */
1994 for (next = NEXT_INSN (temp); next != p;
1995 next = NEXT_INSN (next))
1996 if (! (GET_CODE (next) == INSN
1997 && GET_CODE (PATTERN (next)) == USE)
1998 && GET_CODE (next) != NOTE)
1999 break;
2001 /* If that is the call, this may be the insn
2002 that loads the function address.
2004 Extract the function address from the insn
2005 that loads it into a register.
2006 If this insn was cse'd, we get incorrect code.
2008 So emit a new move insn that copies the
2009 function address into the register that the
2010 call insn will use. flow.c will delete any
2011 redundant stores that we have created. */
2012 if (GET_CODE (next) == CALL_INSN
2013 && GET_CODE (body) == SET
2014 && GET_CODE (SET_DEST (body)) == REG
2015 && (n = find_reg_note (temp, REG_EQUAL,
2016 NULL_RTX)))
2018 fn_reg = SET_SRC (body);
2019 if (GET_CODE (fn_reg) != REG)
2020 fn_reg = SET_DEST (body);
2021 fn_address = XEXP (n, 0);
2022 fn_address_insn = temp;
2024 /* We have the call insn.
2025 If it uses the register we suspect it might,
2026 load it with the correct address directly. */
2027 if (GET_CODE (temp) == CALL_INSN
2028 && fn_address != 0
2029 && reg_referenced_p (fn_reg, body))
2030 loop_insn_emit_after (loop, 0, fn_address_insn,
2031 gen_move_insn
2032 (fn_reg, fn_address));
2034 if (GET_CODE (temp) == CALL_INSN)
2036 i1 = loop_call_insn_hoist (loop, body);
2037 /* Because the USAGE information potentially
2038 contains objects other than hard registers
2039 we need to copy it. */
2040 if (CALL_INSN_FUNCTION_USAGE (temp))
2041 CALL_INSN_FUNCTION_USAGE (i1)
2042 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2044 else
2045 i1 = loop_insn_hoist (loop, body);
2046 if (first == 0)
2047 first = i1;
2048 if (temp == fn_address_insn)
2049 fn_address_insn = i1;
2050 REG_NOTES (i1) = REG_NOTES (temp);
2051 REG_NOTES (temp) = NULL;
2052 delete_insn (temp);
2054 if (new_start == 0)
2055 new_start = first;
2057 if (m->savemode != VOIDmode)
2059 /* P sets REG to zero; but we should clear only
2060 the bits that are not covered by the mode
2061 m->savemode. */
2062 rtx reg = m->set_dest;
2063 rtx sequence;
2064 rtx tem;
2066 start_sequence ();
2067 tem = expand_simple_binop
2068 (GET_MODE (reg), AND, reg,
2069 GEN_INT ((((HOST_WIDE_INT) 1
2070 << GET_MODE_BITSIZE (m->savemode)))
2071 - 1),
2072 reg, 1, OPTAB_LIB_WIDEN);
2073 if (tem == 0)
2074 abort ();
2075 if (tem != reg)
2076 emit_move_insn (reg, tem);
2077 sequence = get_insns ();
2078 end_sequence ();
2079 i1 = loop_insn_hoist (loop, sequence);
2081 else if (GET_CODE (p) == CALL_INSN)
2083 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2084 /* Because the USAGE information potentially
2085 contains objects other than hard registers
2086 we need to copy it. */
2087 if (CALL_INSN_FUNCTION_USAGE (p))
2088 CALL_INSN_FUNCTION_USAGE (i1)
2089 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2091 else if (count == m->consec && m->move_insn_first)
2093 rtx seq;
2094 /* The SET_SRC might not be invariant, so we must
2095 use the REG_EQUAL note. */
2096 start_sequence ();
2097 emit_move_insn (m->set_dest, m->set_src);
2098 seq = get_insns ();
2099 end_sequence ();
2101 add_label_notes (m->set_src, seq);
2103 i1 = loop_insn_hoist (loop, seq);
2104 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2105 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2106 : REG_EQUAL, m->set_src);
2108 else
2109 i1 = loop_insn_hoist (loop, PATTERN (p));
2111 if (REG_NOTES (i1) == 0)
2113 REG_NOTES (i1) = REG_NOTES (p);
2114 REG_NOTES (p) = NULL;
2116 /* If there is a REG_EQUAL note present whose value
2117 is not loop invariant, then delete it, since it
2118 may cause problems with later optimization passes.
2119 It is possible for cse to create such notes
2120 like this as a result of record_jump_cond. */
2122 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2123 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2124 remove_note (i1, temp);
2127 if (new_start == 0)
2128 new_start = i1;
2130 if (loop_dump_stream)
2131 fprintf (loop_dump_stream, " moved to %d",
2132 INSN_UID (i1));
2134 /* If library call, now fix the REG_NOTES that contain
2135 insn pointers, namely REG_LIBCALL on FIRST
2136 and REG_RETVAL on I1. */
2137 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2139 XEXP (temp, 0) = first;
2140 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2141 XEXP (temp, 0) = i1;
2144 temp = p;
2145 delete_insn (p);
2146 p = NEXT_INSN (p);
2148 /* simplify_giv_expr expects that it can walk the insns
2149 at m->insn forwards and see this old sequence we are
2150 tossing here. delete_insn does preserve the next
2151 pointers, but when we skip over a NOTE we must fix
2152 it up. Otherwise that code walks into the non-deleted
2153 insn stream. */
2154 while (p && GET_CODE (p) == NOTE)
2155 p = NEXT_INSN (temp) = NEXT_INSN (p);
2158 /* The more regs we move, the less we like moving them. */
2159 threshold -= 3;
2162 /* Any other movable that loads the same register
2163 MUST be moved. */
2164 already_moved[regno] = 1;
2166 /* This reg has been moved out of one loop. */
2167 regs->array[regno].moved_once = 1;
2169 /* The reg set here is now invariant. */
2170 if (! m->partial)
2172 int i;
2173 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2174 regs->array[regno+i].set_in_loop = 0;
2177 m->done = 1;
2179 /* Change the length-of-life info for the register
2180 to say it lives at least the full length of this loop.
2181 This will help guide optimizations in outer loops. */
2183 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2184 /* This is the old insn before all the moved insns.
2185 We can't use the moved insn because it is out of range
2186 in uid_luid. Only the old insns have luids. */
2187 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2188 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2189 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2191 /* Combine with this moved insn any other matching movables. */
2193 if (! m->partial)
2194 for (m1 = movables->head; m1; m1 = m1->next)
2195 if (m1->match == m)
2197 rtx temp;
2199 /* Schedule the reg loaded by M1
2200 for replacement so that shares the reg of M.
2201 If the modes differ (only possible in restricted
2202 circumstances, make a SUBREG.
2204 Note this assumes that the target dependent files
2205 treat REG and SUBREG equally, including within
2206 GO_IF_LEGITIMATE_ADDRESS and in all the
2207 predicates since we never verify that replacing the
2208 original register with a SUBREG results in a
2209 recognizable insn. */
2210 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2211 reg_map[m1->regno] = m->set_dest;
2212 else
2213 reg_map[m1->regno]
2214 = gen_lowpart_common (GET_MODE (m1->set_dest),
2215 m->set_dest);
2217 /* Get rid of the matching insn
2218 and prevent further processing of it. */
2219 m1->done = 1;
2221 /* if library call, delete all insns. */
2222 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2223 NULL_RTX)))
2224 delete_insn_chain (XEXP (temp, 0), m1->insn);
2225 else
2226 delete_insn (m1->insn);
2228 /* Any other movable that loads the same register
2229 MUST be moved. */
2230 already_moved[m1->regno] = 1;
2232 /* The reg merged here is now invariant,
2233 if the reg it matches is invariant. */
2234 if (! m->partial)
2236 int i;
2237 for (i = 0;
2238 i < (int) LOOP_REGNO_NREGS (regno, m1->set_dest);
2239 i++)
2240 regs->array[m1->regno+i].set_in_loop = 0;
2244 else if (loop_dump_stream)
2245 fprintf (loop_dump_stream, "not desirable");
2247 else if (loop_dump_stream && !m->match)
2248 fprintf (loop_dump_stream, "not safe");
2250 if (loop_dump_stream)
2251 fprintf (loop_dump_stream, "\n");
2254 if (new_start == 0)
2255 new_start = loop_start;
2257 /* Go through all the instructions in the loop, making
2258 all the register substitutions scheduled in REG_MAP. */
2259 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2260 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2261 || GET_CODE (p) == CALL_INSN)
2263 replace_regs (PATTERN (p), reg_map, nregs, 0);
2264 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2265 INSN_CODE (p) = -1;
2268 /* Clean up. */
2269 free (reg_map);
2270 free (already_moved);
2274 static void
2275 loop_movables_add (movables, m)
2276 struct loop_movables *movables;
2277 struct movable *m;
2279 if (movables->head == 0)
2280 movables->head = m;
2281 else
2282 movables->last->next = m;
2283 movables->last = m;
2287 static void
2288 loop_movables_free (movables)
2289 struct loop_movables *movables;
2291 struct movable *m;
2292 struct movable *m_next;
2294 for (m = movables->head; m; m = m_next)
2296 m_next = m->next;
2297 free (m);
2301 #if 0
2302 /* Scan X and replace the address of any MEM in it with ADDR.
2303 REG is the address that MEM should have before the replacement. */
2305 static void
2306 replace_call_address (x, reg, addr)
2307 rtx x, reg, addr;
2309 enum rtx_code code;
2310 int i;
2311 const char *fmt;
2313 if (x == 0)
2314 return;
2315 code = GET_CODE (x);
2316 switch (code)
2318 case PC:
2319 case CC0:
2320 case CONST_INT:
2321 case CONST_DOUBLE:
2322 case CONST:
2323 case SYMBOL_REF:
2324 case LABEL_REF:
2325 case REG:
2326 return;
2328 case SET:
2329 /* Short cut for very common case. */
2330 replace_call_address (XEXP (x, 1), reg, addr);
2331 return;
2333 case CALL:
2334 /* Short cut for very common case. */
2335 replace_call_address (XEXP (x, 0), reg, addr);
2336 return;
2338 case MEM:
2339 /* If this MEM uses a reg other than the one we expected,
2340 something is wrong. */
2341 if (XEXP (x, 0) != reg)
2342 abort ();
2343 XEXP (x, 0) = addr;
2344 return;
2346 default:
2347 break;
2350 fmt = GET_RTX_FORMAT (code);
2351 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2353 if (fmt[i] == 'e')
2354 replace_call_address (XEXP (x, i), reg, addr);
2355 else if (fmt[i] == 'E')
2357 int j;
2358 for (j = 0; j < XVECLEN (x, i); j++)
2359 replace_call_address (XVECEXP (x, i, j), reg, addr);
2363 #endif
2365 /* Return the number of memory refs to addresses that vary
2366 in the rtx X. */
2368 static int
2369 count_nonfixed_reads (loop, x)
2370 const struct loop *loop;
2371 rtx x;
2373 enum rtx_code code;
2374 int i;
2375 const char *fmt;
2376 int value;
2378 if (x == 0)
2379 return 0;
2381 code = GET_CODE (x);
2382 switch (code)
2384 case PC:
2385 case CC0:
2386 case CONST_INT:
2387 case CONST_DOUBLE:
2388 case CONST:
2389 case SYMBOL_REF:
2390 case LABEL_REF:
2391 case REG:
2392 return 0;
2394 case MEM:
2395 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2396 + count_nonfixed_reads (loop, XEXP (x, 0)));
2398 default:
2399 break;
2402 value = 0;
2403 fmt = GET_RTX_FORMAT (code);
2404 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2406 if (fmt[i] == 'e')
2407 value += count_nonfixed_reads (loop, XEXP (x, i));
2408 if (fmt[i] == 'E')
2410 int j;
2411 for (j = 0; j < XVECLEN (x, i); j++)
2412 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2415 return value;
2418 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2419 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2420 `unknown_address_altered', `unknown_constant_address_altered', and
2421 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2422 list `store_mems' in LOOP. */
2424 static void
2425 prescan_loop (loop)
2426 struct loop *loop;
2428 int level = 1;
2429 rtx insn;
2430 struct loop_info *loop_info = LOOP_INFO (loop);
2431 rtx start = loop->start;
2432 rtx end = loop->end;
2433 /* The label after END. Jumping here is just like falling off the
2434 end of the loop. We use next_nonnote_insn instead of next_label
2435 as a hedge against the (pathological) case where some actual insn
2436 might end up between the two. */
2437 rtx exit_target = next_nonnote_insn (end);
2439 loop_info->has_indirect_jump = indirect_jump_in_function;
2440 loop_info->pre_header_has_call = 0;
2441 loop_info->has_call = 0;
2442 loop_info->has_nonconst_call = 0;
2443 loop_info->has_prefetch = 0;
2444 loop_info->has_volatile = 0;
2445 loop_info->has_tablejump = 0;
2446 loop_info->has_multiple_exit_targets = 0;
2447 loop->level = 1;
2449 loop_info->unknown_address_altered = 0;
2450 loop_info->unknown_constant_address_altered = 0;
2451 loop_info->store_mems = NULL_RTX;
2452 loop_info->first_loop_store_insn = NULL_RTX;
2453 loop_info->mems_idx = 0;
2454 loop_info->num_mem_sets = 0;
2457 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2458 insn = PREV_INSN (insn))
2460 if (GET_CODE (insn) == CALL_INSN)
2462 loop_info->pre_header_has_call = 1;
2463 break;
2467 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2468 insn = NEXT_INSN (insn))
2470 switch (GET_CODE (insn))
2472 case NOTE:
2473 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2475 ++level;
2476 /* Count number of loops contained in this one. */
2477 loop->level++;
2479 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2480 --level;
2481 break;
2483 case CALL_INSN:
2484 if (! CONST_OR_PURE_CALL_P (insn))
2486 loop_info->unknown_address_altered = 1;
2487 loop_info->has_nonconst_call = 1;
2489 else if (pure_call_p (insn))
2490 loop_info->has_nonconst_call = 1;
2491 loop_info->has_call = 1;
2492 if (can_throw_internal (insn))
2493 loop_info->has_multiple_exit_targets = 1;
2494 break;
2496 case JUMP_INSN:
2497 if (! loop_info->has_multiple_exit_targets)
2499 rtx set = pc_set (insn);
2501 if (set)
2503 rtx src = SET_SRC (set);
2504 rtx label1, label2;
2506 if (GET_CODE (src) == IF_THEN_ELSE)
2508 label1 = XEXP (src, 1);
2509 label2 = XEXP (src, 2);
2511 else
2513 label1 = src;
2514 label2 = NULL_RTX;
2519 if (label1 && label1 != pc_rtx)
2521 if (GET_CODE (label1) != LABEL_REF)
2523 /* Something tricky. */
2524 loop_info->has_multiple_exit_targets = 1;
2525 break;
2527 else if (XEXP (label1, 0) != exit_target
2528 && LABEL_OUTSIDE_LOOP_P (label1))
2530 /* A jump outside the current loop. */
2531 loop_info->has_multiple_exit_targets = 1;
2532 break;
2536 label1 = label2;
2537 label2 = NULL_RTX;
2539 while (label1);
2541 else
2543 /* A return, or something tricky. */
2544 loop_info->has_multiple_exit_targets = 1;
2547 /* FALLTHRU */
2549 case INSN:
2550 if (volatile_refs_p (PATTERN (insn)))
2551 loop_info->has_volatile = 1;
2553 if (GET_CODE (insn) == JUMP_INSN
2554 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2555 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2556 loop_info->has_tablejump = 1;
2558 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2559 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2560 loop_info->first_loop_store_insn = insn;
2562 if (flag_non_call_exceptions && can_throw_internal (insn))
2563 loop_info->has_multiple_exit_targets = 1;
2564 break;
2566 default:
2567 break;
2571 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2572 if (/* An exception thrown by a called function might land us
2573 anywhere. */
2574 ! loop_info->has_nonconst_call
2575 /* We don't want loads for MEMs moved to a location before the
2576 one at which their stack memory becomes allocated. (Note
2577 that this is not a problem for malloc, etc., since those
2578 require actual function calls. */
2579 && ! current_function_calls_alloca
2580 /* There are ways to leave the loop other than falling off the
2581 end. */
2582 && ! loop_info->has_multiple_exit_targets)
2583 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2584 insn = NEXT_INSN (insn))
2585 for_each_rtx (&insn, insert_loop_mem, loop_info);
2587 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2588 that loop_invariant_p and load_mems can use true_dependence
2589 to determine what is really clobbered. */
2590 if (loop_info->unknown_address_altered)
2592 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2594 loop_info->store_mems
2595 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2597 if (loop_info->unknown_constant_address_altered)
2599 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2601 RTX_UNCHANGING_P (mem) = 1;
2602 loop_info->store_mems
2603 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2607 /* Invalidate all loops containing LABEL. */
2609 static void
2610 invalidate_loops_containing_label (label)
2611 rtx label;
2613 struct loop *loop;
2614 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2615 loop->invalid = 1;
2618 /* Scan the function looking for loops. Record the start and end of each loop.
2619 Also mark as invalid loops any loops that contain a setjmp or are branched
2620 to from outside the loop. */
2622 static void
2623 find_and_verify_loops (f, loops)
2624 rtx f;
2625 struct loops *loops;
2627 rtx insn;
2628 rtx label;
2629 int num_loops;
2630 struct loop *current_loop;
2631 struct loop *next_loop;
2632 struct loop *loop;
2634 num_loops = loops->num;
2636 compute_luids (f, NULL_RTX, 0);
2638 /* If there are jumps to undefined labels,
2639 treat them as jumps out of any/all loops.
2640 This also avoids writing past end of tables when there are no loops. */
2641 uid_loop[0] = NULL;
2643 /* Find boundaries of loops, mark which loops are contained within
2644 loops, and invalidate loops that have setjmp. */
2646 num_loops = 0;
2647 current_loop = NULL;
2648 for (insn = f; insn; insn = NEXT_INSN (insn))
2650 if (GET_CODE (insn) == NOTE)
2651 switch (NOTE_LINE_NUMBER (insn))
2653 case NOTE_INSN_LOOP_BEG:
2654 next_loop = loops->array + num_loops;
2655 next_loop->num = num_loops;
2656 num_loops++;
2657 next_loop->start = insn;
2658 next_loop->outer = current_loop;
2659 current_loop = next_loop;
2660 break;
2662 case NOTE_INSN_LOOP_CONT:
2663 current_loop->cont = insn;
2664 break;
2666 case NOTE_INSN_LOOP_VTOP:
2667 current_loop->vtop = insn;
2668 break;
2670 case NOTE_INSN_LOOP_END:
2671 if (! current_loop)
2672 abort ();
2674 current_loop->end = insn;
2675 current_loop = current_loop->outer;
2676 break;
2678 default:
2679 break;
2682 if (GET_CODE (insn) == CALL_INSN
2683 && find_reg_note (insn, REG_SETJMP, NULL))
2685 /* In this case, we must invalidate our current loop and any
2686 enclosing loop. */
2687 for (loop = current_loop; loop; loop = loop->outer)
2689 loop->invalid = 1;
2690 if (loop_dump_stream)
2691 fprintf (loop_dump_stream,
2692 "\nLoop at %d ignored due to setjmp.\n",
2693 INSN_UID (loop->start));
2697 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2698 enclosing loop, but this doesn't matter. */
2699 uid_loop[INSN_UID (insn)] = current_loop;
2702 /* Any loop containing a label used in an initializer must be invalidated,
2703 because it can be jumped into from anywhere. */
2704 for (label = forced_labels; label; label = XEXP (label, 1))
2705 invalidate_loops_containing_label (XEXP (label, 0));
2707 /* Any loop containing a label used for an exception handler must be
2708 invalidated, because it can be jumped into from anywhere. */
2709 for_each_eh_label (invalidate_loops_containing_label);
2711 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2712 loop that it is not contained within, that loop is marked invalid.
2713 If any INSN or CALL_INSN uses a label's address, then the loop containing
2714 that label is marked invalid, because it could be jumped into from
2715 anywhere.
2717 Also look for blocks of code ending in an unconditional branch that
2718 exits the loop. If such a block is surrounded by a conditional
2719 branch around the block, move the block elsewhere (see below) and
2720 invert the jump to point to the code block. This may eliminate a
2721 label in our loop and will simplify processing by both us and a
2722 possible second cse pass. */
2724 for (insn = f; insn; insn = NEXT_INSN (insn))
2725 if (INSN_P (insn))
2727 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2729 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2731 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2732 if (note)
2733 invalidate_loops_containing_label (XEXP (note, 0));
2736 if (GET_CODE (insn) != JUMP_INSN)
2737 continue;
2739 mark_loop_jump (PATTERN (insn), this_loop);
2741 /* See if this is an unconditional branch outside the loop. */
2742 if (this_loop
2743 && (GET_CODE (PATTERN (insn)) == RETURN
2744 || (any_uncondjump_p (insn)
2745 && onlyjump_p (insn)
2746 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2747 != this_loop)))
2748 && get_max_uid () < max_uid_for_loop)
2750 rtx p;
2751 rtx our_next = next_real_insn (insn);
2752 rtx last_insn_to_move = NEXT_INSN (insn);
2753 struct loop *dest_loop;
2754 struct loop *outer_loop = NULL;
2756 /* Go backwards until we reach the start of the loop, a label,
2757 or a JUMP_INSN. */
2758 for (p = PREV_INSN (insn);
2759 GET_CODE (p) != CODE_LABEL
2760 && ! (GET_CODE (p) == NOTE
2761 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2762 && GET_CODE (p) != JUMP_INSN;
2763 p = PREV_INSN (p))
2766 /* Check for the case where we have a jump to an inner nested
2767 loop, and do not perform the optimization in that case. */
2769 if (JUMP_LABEL (insn))
2771 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2772 if (dest_loop)
2774 for (outer_loop = dest_loop; outer_loop;
2775 outer_loop = outer_loop->outer)
2776 if (outer_loop == this_loop)
2777 break;
2781 /* Make sure that the target of P is within the current loop. */
2783 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2784 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2785 outer_loop = this_loop;
2787 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2788 we have a block of code to try to move.
2790 We look backward and then forward from the target of INSN
2791 to find a BARRIER at the same loop depth as the target.
2792 If we find such a BARRIER, we make a new label for the start
2793 of the block, invert the jump in P and point it to that label,
2794 and move the block of code to the spot we found. */
2796 if (! outer_loop
2797 && GET_CODE (p) == JUMP_INSN
2798 && JUMP_LABEL (p) != 0
2799 /* Just ignore jumps to labels that were never emitted.
2800 These always indicate compilation errors. */
2801 && INSN_UID (JUMP_LABEL (p)) != 0
2802 && any_condjump_p (p) && onlyjump_p (p)
2803 && next_real_insn (JUMP_LABEL (p)) == our_next
2804 /* If it's not safe to move the sequence, then we
2805 mustn't try. */
2806 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2807 &last_insn_to_move))
2809 rtx target
2810 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2811 struct loop *target_loop = uid_loop[INSN_UID (target)];
2812 rtx loc, loc2;
2813 rtx tmp;
2815 /* Search for possible garbage past the conditional jumps
2816 and look for the last barrier. */
2817 for (tmp = last_insn_to_move;
2818 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2819 if (GET_CODE (tmp) == BARRIER)
2820 last_insn_to_move = tmp;
2822 for (loc = target; loc; loc = PREV_INSN (loc))
2823 if (GET_CODE (loc) == BARRIER
2824 /* Don't move things inside a tablejump. */
2825 && ((loc2 = next_nonnote_insn (loc)) == 0
2826 || GET_CODE (loc2) != CODE_LABEL
2827 || (loc2 = next_nonnote_insn (loc2)) == 0
2828 || GET_CODE (loc2) != JUMP_INSN
2829 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2830 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2831 && uid_loop[INSN_UID (loc)] == target_loop)
2832 break;
2834 if (loc == 0)
2835 for (loc = target; loc; loc = NEXT_INSN (loc))
2836 if (GET_CODE (loc) == BARRIER
2837 /* Don't move things inside a tablejump. */
2838 && ((loc2 = next_nonnote_insn (loc)) == 0
2839 || GET_CODE (loc2) != CODE_LABEL
2840 || (loc2 = next_nonnote_insn (loc2)) == 0
2841 || GET_CODE (loc2) != JUMP_INSN
2842 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2843 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2844 && uid_loop[INSN_UID (loc)] == target_loop)
2845 break;
2847 if (loc)
2849 rtx cond_label = JUMP_LABEL (p);
2850 rtx new_label = get_label_after (p);
2852 /* Ensure our label doesn't go away. */
2853 LABEL_NUSES (cond_label)++;
2855 /* Verify that uid_loop is large enough and that
2856 we can invert P. */
2857 if (invert_jump (p, new_label, 1))
2859 rtx q, r;
2861 /* If no suitable BARRIER was found, create a suitable
2862 one before TARGET. Since TARGET is a fall through
2863 path, we'll need to insert an jump around our block
2864 and add a BARRIER before TARGET.
2866 This creates an extra unconditional jump outside
2867 the loop. However, the benefits of removing rarely
2868 executed instructions from inside the loop usually
2869 outweighs the cost of the extra unconditional jump
2870 outside the loop. */
2871 if (loc == 0)
2873 rtx temp;
2875 temp = gen_jump (JUMP_LABEL (insn));
2876 temp = emit_jump_insn_before (temp, target);
2877 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2878 LABEL_NUSES (JUMP_LABEL (insn))++;
2879 loc = emit_barrier_before (target);
2882 /* Include the BARRIER after INSN and copy the
2883 block after LOC. */
2884 if (squeeze_notes (&new_label, &last_insn_to_move))
2885 abort ();
2886 reorder_insns (new_label, last_insn_to_move, loc);
2888 /* All those insns are now in TARGET_LOOP. */
2889 for (q = new_label;
2890 q != NEXT_INSN (last_insn_to_move);
2891 q = NEXT_INSN (q))
2892 uid_loop[INSN_UID (q)] = target_loop;
2894 /* The label jumped to by INSN is no longer a loop
2895 exit. Unless INSN does not have a label (e.g.,
2896 it is a RETURN insn), search loop->exit_labels
2897 to find its label_ref, and remove it. Also turn
2898 off LABEL_OUTSIDE_LOOP_P bit. */
2899 if (JUMP_LABEL (insn))
2901 for (q = 0, r = this_loop->exit_labels;
2903 q = r, r = LABEL_NEXTREF (r))
2904 if (XEXP (r, 0) == JUMP_LABEL (insn))
2906 LABEL_OUTSIDE_LOOP_P (r) = 0;
2907 if (q)
2908 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2909 else
2910 this_loop->exit_labels = LABEL_NEXTREF (r);
2911 break;
2914 for (loop = this_loop; loop && loop != target_loop;
2915 loop = loop->outer)
2916 loop->exit_count--;
2918 /* If we didn't find it, then something is
2919 wrong. */
2920 if (! r)
2921 abort ();
2924 /* P is now a jump outside the loop, so it must be put
2925 in loop->exit_labels, and marked as such.
2926 The easiest way to do this is to just call
2927 mark_loop_jump again for P. */
2928 mark_loop_jump (PATTERN (p), this_loop);
2930 /* If INSN now jumps to the insn after it,
2931 delete INSN. */
2932 if (JUMP_LABEL (insn) != 0
2933 && (next_real_insn (JUMP_LABEL (insn))
2934 == next_real_insn (insn)))
2935 delete_related_insns (insn);
2938 /* Continue the loop after where the conditional
2939 branch used to jump, since the only branch insn
2940 in the block (if it still remains) is an inter-loop
2941 branch and hence needs no processing. */
2942 insn = NEXT_INSN (cond_label);
2944 if (--LABEL_NUSES (cond_label) == 0)
2945 delete_related_insns (cond_label);
2947 /* This loop will be continued with NEXT_INSN (insn). */
2948 insn = PREV_INSN (insn);
2955 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2956 loops it is contained in, mark the target loop invalid.
2958 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2960 static void
2961 mark_loop_jump (x, loop)
2962 rtx x;
2963 struct loop *loop;
2965 struct loop *dest_loop;
2966 struct loop *outer_loop;
2967 int i;
2969 switch (GET_CODE (x))
2971 case PC:
2972 case USE:
2973 case CLOBBER:
2974 case REG:
2975 case MEM:
2976 case CONST_INT:
2977 case CONST_DOUBLE:
2978 case RETURN:
2979 return;
2981 case CONST:
2982 /* There could be a label reference in here. */
2983 mark_loop_jump (XEXP (x, 0), loop);
2984 return;
2986 case PLUS:
2987 case MINUS:
2988 case MULT:
2989 mark_loop_jump (XEXP (x, 0), loop);
2990 mark_loop_jump (XEXP (x, 1), loop);
2991 return;
2993 case LO_SUM:
2994 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2995 mark_loop_jump (XEXP (x, 1), loop);
2996 return;
2998 case SIGN_EXTEND:
2999 case ZERO_EXTEND:
3000 mark_loop_jump (XEXP (x, 0), loop);
3001 return;
3003 case LABEL_REF:
3004 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3006 /* Link together all labels that branch outside the loop. This
3007 is used by final_[bg]iv_value and the loop unrolling code. Also
3008 mark this LABEL_REF so we know that this branch should predict
3009 false. */
3011 /* A check to make sure the label is not in an inner nested loop,
3012 since this does not count as a loop exit. */
3013 if (dest_loop)
3015 for (outer_loop = dest_loop; outer_loop;
3016 outer_loop = outer_loop->outer)
3017 if (outer_loop == loop)
3018 break;
3020 else
3021 outer_loop = NULL;
3023 if (loop && ! outer_loop)
3025 LABEL_OUTSIDE_LOOP_P (x) = 1;
3026 LABEL_NEXTREF (x) = loop->exit_labels;
3027 loop->exit_labels = x;
3029 for (outer_loop = loop;
3030 outer_loop && outer_loop != dest_loop;
3031 outer_loop = outer_loop->outer)
3032 outer_loop->exit_count++;
3035 /* If this is inside a loop, but not in the current loop or one enclosed
3036 by it, it invalidates at least one loop. */
3038 if (! dest_loop)
3039 return;
3041 /* We must invalidate every nested loop containing the target of this
3042 label, except those that also contain the jump insn. */
3044 for (; dest_loop; dest_loop = dest_loop->outer)
3046 /* Stop when we reach a loop that also contains the jump insn. */
3047 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3048 if (dest_loop == outer_loop)
3049 return;
3051 /* If we get here, we know we need to invalidate a loop. */
3052 if (loop_dump_stream && ! dest_loop->invalid)
3053 fprintf (loop_dump_stream,
3054 "\nLoop at %d ignored due to multiple entry points.\n",
3055 INSN_UID (dest_loop->start));
3057 dest_loop->invalid = 1;
3059 return;
3061 case SET:
3062 /* If this is not setting pc, ignore. */
3063 if (SET_DEST (x) == pc_rtx)
3064 mark_loop_jump (SET_SRC (x), loop);
3065 return;
3067 case IF_THEN_ELSE:
3068 mark_loop_jump (XEXP (x, 1), loop);
3069 mark_loop_jump (XEXP (x, 2), loop);
3070 return;
3072 case PARALLEL:
3073 case ADDR_VEC:
3074 for (i = 0; i < XVECLEN (x, 0); i++)
3075 mark_loop_jump (XVECEXP (x, 0, i), loop);
3076 return;
3078 case ADDR_DIFF_VEC:
3079 for (i = 0; i < XVECLEN (x, 1); i++)
3080 mark_loop_jump (XVECEXP (x, 1, i), loop);
3081 return;
3083 default:
3084 /* Strictly speaking this is not a jump into the loop, only a possible
3085 jump out of the loop. However, we have no way to link the destination
3086 of this jump onto the list of exit labels. To be safe we mark this
3087 loop and any containing loops as invalid. */
3088 if (loop)
3090 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3092 if (loop_dump_stream && ! outer_loop->invalid)
3093 fprintf (loop_dump_stream,
3094 "\nLoop at %d ignored due to unknown exit jump.\n",
3095 INSN_UID (outer_loop->start));
3096 outer_loop->invalid = 1;
3099 return;
3103 /* Return nonzero if there is a label in the range from
3104 insn INSN to and including the insn whose luid is END
3105 INSN must have an assigned luid (i.e., it must not have
3106 been previously created by loop.c). */
3108 static int
3109 labels_in_range_p (insn, end)
3110 rtx insn;
3111 int end;
3113 while (insn && INSN_LUID (insn) <= end)
3115 if (GET_CODE (insn) == CODE_LABEL)
3116 return 1;
3117 insn = NEXT_INSN (insn);
3120 return 0;
3123 /* Record that a memory reference X is being set. */
3125 static void
3126 note_addr_stored (x, y, data)
3127 rtx x;
3128 rtx y ATTRIBUTE_UNUSED;
3129 void *data ATTRIBUTE_UNUSED;
3131 struct loop_info *loop_info = data;
3133 if (x == 0 || GET_CODE (x) != MEM)
3134 return;
3136 /* Count number of memory writes.
3137 This affects heuristics in strength_reduce. */
3138 loop_info->num_mem_sets++;
3140 /* BLKmode MEM means all memory is clobbered. */
3141 if (GET_MODE (x) == BLKmode)
3143 if (RTX_UNCHANGING_P (x))
3144 loop_info->unknown_constant_address_altered = 1;
3145 else
3146 loop_info->unknown_address_altered = 1;
3148 return;
3151 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3152 loop_info->store_mems);
3155 /* X is a value modified by an INSN that references a biv inside a loop
3156 exit test (ie, X is somehow related to the value of the biv). If X
3157 is a pseudo that is used more than once, then the biv is (effectively)
3158 used more than once. DATA is a pointer to a loop_regs structure. */
3160 static void
3161 note_set_pseudo_multiple_uses (x, y, data)
3162 rtx x;
3163 rtx y ATTRIBUTE_UNUSED;
3164 void *data;
3166 struct loop_regs *regs = (struct loop_regs *) data;
3168 if (x == 0)
3169 return;
3171 while (GET_CODE (x) == STRICT_LOW_PART
3172 || GET_CODE (x) == SIGN_EXTRACT
3173 || GET_CODE (x) == ZERO_EXTRACT
3174 || GET_CODE (x) == SUBREG)
3175 x = XEXP (x, 0);
3177 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3178 return;
3180 /* If we do not have usage information, or if we know the register
3181 is used more than once, note that fact for check_dbra_loop. */
3182 if (REGNO (x) >= max_reg_before_loop
3183 || ! regs->array[REGNO (x)].single_usage
3184 || regs->array[REGNO (x)].single_usage == const0_rtx)
3185 regs->multiple_uses = 1;
3188 /* Return nonzero if the rtx X is invariant over the current loop.
3190 The value is 2 if we refer to something only conditionally invariant.
3192 A memory ref is invariant if it is not volatile and does not conflict
3193 with anything stored in `loop_info->store_mems'. */
3196 loop_invariant_p (loop, x)
3197 const struct loop *loop;
3198 rtx x;
3200 struct loop_info *loop_info = LOOP_INFO (loop);
3201 struct loop_regs *regs = LOOP_REGS (loop);
3202 int i;
3203 enum rtx_code code;
3204 const char *fmt;
3205 int conditional = 0;
3206 rtx mem_list_entry;
3208 if (x == 0)
3209 return 1;
3210 code = GET_CODE (x);
3211 switch (code)
3213 case CONST_INT:
3214 case CONST_DOUBLE:
3215 case SYMBOL_REF:
3216 case CONST:
3217 return 1;
3219 case LABEL_REF:
3220 /* A LABEL_REF is normally invariant, however, if we are unrolling
3221 loops, and this label is inside the loop, then it isn't invariant.
3222 This is because each unrolled copy of the loop body will have
3223 a copy of this label. If this was invariant, then an insn loading
3224 the address of this label into a register might get moved outside
3225 the loop, and then each loop body would end up using the same label.
3227 We don't know the loop bounds here though, so just fail for all
3228 labels. */
3229 if (flag_unroll_loops)
3230 return 0;
3231 else
3232 return 1;
3234 case PC:
3235 case CC0:
3236 case UNSPEC_VOLATILE:
3237 return 0;
3239 case REG:
3240 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3241 since the reg might be set by initialization within the loop. */
3243 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3244 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3245 && ! current_function_has_nonlocal_goto)
3246 return 1;
3248 if (LOOP_INFO (loop)->has_call
3249 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3250 return 0;
3252 if (regs->array[REGNO (x)].set_in_loop < 0)
3253 return 2;
3255 return regs->array[REGNO (x)].set_in_loop == 0;
3257 case MEM:
3258 /* Volatile memory references must be rejected. Do this before
3259 checking for read-only items, so that volatile read-only items
3260 will be rejected also. */
3261 if (MEM_VOLATILE_P (x))
3262 return 0;
3264 /* See if there is any dependence between a store and this load. */
3265 mem_list_entry = loop_info->store_mems;
3266 while (mem_list_entry)
3268 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3269 x, rtx_varies_p))
3270 return 0;
3272 mem_list_entry = XEXP (mem_list_entry, 1);
3275 /* It's not invalidated by a store in memory
3276 but we must still verify the address is invariant. */
3277 break;
3279 case ASM_OPERANDS:
3280 /* Don't mess with insns declared volatile. */
3281 if (MEM_VOLATILE_P (x))
3282 return 0;
3283 break;
3285 default:
3286 break;
3289 fmt = GET_RTX_FORMAT (code);
3290 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3292 if (fmt[i] == 'e')
3294 int tem = loop_invariant_p (loop, XEXP (x, i));
3295 if (tem == 0)
3296 return 0;
3297 if (tem == 2)
3298 conditional = 1;
3300 else if (fmt[i] == 'E')
3302 int j;
3303 for (j = 0; j < XVECLEN (x, i); j++)
3305 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3306 if (tem == 0)
3307 return 0;
3308 if (tem == 2)
3309 conditional = 1;
3315 return 1 + conditional;
3318 /* Return nonzero if all the insns in the loop that set REG
3319 are INSN and the immediately following insns,
3320 and if each of those insns sets REG in an invariant way
3321 (not counting uses of REG in them).
3323 The value is 2 if some of these insns are only conditionally invariant.
3325 We assume that INSN itself is the first set of REG
3326 and that its source is invariant. */
3328 static int
3329 consec_sets_invariant_p (loop, reg, n_sets, insn)
3330 const struct loop *loop;
3331 int n_sets;
3332 rtx reg, insn;
3334 struct loop_regs *regs = LOOP_REGS (loop);
3335 rtx p = insn;
3336 unsigned int regno = REGNO (reg);
3337 rtx temp;
3338 /* Number of sets we have to insist on finding after INSN. */
3339 int count = n_sets - 1;
3340 int old = regs->array[regno].set_in_loop;
3341 int value = 0;
3342 int this;
3344 /* If N_SETS hit the limit, we can't rely on its value. */
3345 if (n_sets == 127)
3346 return 0;
3348 regs->array[regno].set_in_loop = 0;
3350 while (count > 0)
3352 enum rtx_code code;
3353 rtx set;
3355 p = NEXT_INSN (p);
3356 code = GET_CODE (p);
3358 /* If library call, skip to end of it. */
3359 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3360 p = XEXP (temp, 0);
3362 this = 0;
3363 if (code == INSN
3364 && (set = single_set (p))
3365 && GET_CODE (SET_DEST (set)) == REG
3366 && REGNO (SET_DEST (set)) == regno)
3368 this = loop_invariant_p (loop, SET_SRC (set));
3369 if (this != 0)
3370 value |= this;
3371 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3373 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3374 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3375 notes are OK. */
3376 this = (CONSTANT_P (XEXP (temp, 0))
3377 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3378 && loop_invariant_p (loop, XEXP (temp, 0))));
3379 if (this != 0)
3380 value |= this;
3383 if (this != 0)
3384 count--;
3385 else if (code != NOTE)
3387 regs->array[regno].set_in_loop = old;
3388 return 0;
3392 regs->array[regno].set_in_loop = old;
3393 /* If loop_invariant_p ever returned 2, we return 2. */
3394 return 1 + (value & 2);
3397 #if 0
3398 /* I don't think this condition is sufficient to allow INSN
3399 to be moved, so we no longer test it. */
3401 /* Return 1 if all insns in the basic block of INSN and following INSN
3402 that set REG are invariant according to TABLE. */
3404 static int
3405 all_sets_invariant_p (reg, insn, table)
3406 rtx reg, insn;
3407 short *table;
3409 rtx p = insn;
3410 int regno = REGNO (reg);
3412 while (1)
3414 enum rtx_code code;
3415 p = NEXT_INSN (p);
3416 code = GET_CODE (p);
3417 if (code == CODE_LABEL || code == JUMP_INSN)
3418 return 1;
3419 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3420 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3421 && REGNO (SET_DEST (PATTERN (p))) == regno)
3423 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3424 return 0;
3428 #endif /* 0 */
3430 /* Look at all uses (not sets) of registers in X. For each, if it is
3431 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3432 a different insn, set USAGE[REGNO] to const0_rtx. */
3434 static void
3435 find_single_use_in_loop (regs, insn, x)
3436 struct loop_regs *regs;
3437 rtx insn;
3438 rtx x;
3440 enum rtx_code code = GET_CODE (x);
3441 const char *fmt = GET_RTX_FORMAT (code);
3442 int i, j;
3444 if (code == REG)
3445 regs->array[REGNO (x)].single_usage
3446 = (regs->array[REGNO (x)].single_usage != 0
3447 && regs->array[REGNO (x)].single_usage != insn)
3448 ? const0_rtx : insn;
3450 else if (code == SET)
3452 /* Don't count SET_DEST if it is a REG; otherwise count things
3453 in SET_DEST because if a register is partially modified, it won't
3454 show up as a potential movable so we don't care how USAGE is set
3455 for it. */
3456 if (GET_CODE (SET_DEST (x)) != REG)
3457 find_single_use_in_loop (regs, insn, SET_DEST (x));
3458 find_single_use_in_loop (regs, insn, SET_SRC (x));
3460 else
3461 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3463 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3464 find_single_use_in_loop (regs, insn, XEXP (x, i));
3465 else if (fmt[i] == 'E')
3466 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3467 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3471 /* Count and record any set in X which is contained in INSN. Update
3472 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3473 in X. */
3475 static void
3476 count_one_set (regs, insn, x, last_set)
3477 struct loop_regs *regs;
3478 rtx insn, x;
3479 rtx *last_set;
3481 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3482 /* Don't move a reg that has an explicit clobber.
3483 It's not worth the pain to try to do it correctly. */
3484 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3486 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3488 rtx dest = SET_DEST (x);
3489 while (GET_CODE (dest) == SUBREG
3490 || GET_CODE (dest) == ZERO_EXTRACT
3491 || GET_CODE (dest) == SIGN_EXTRACT
3492 || GET_CODE (dest) == STRICT_LOW_PART)
3493 dest = XEXP (dest, 0);
3494 if (GET_CODE (dest) == REG)
3496 int i;
3497 int regno = REGNO (dest);
3498 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, dest); i++)
3500 /* If this is the first setting of this reg
3501 in current basic block, and it was set before,
3502 it must be set in two basic blocks, so it cannot
3503 be moved out of the loop. */
3504 if (regs->array[regno].set_in_loop > 0
3505 && last_set == 0)
3506 regs->array[regno+i].may_not_optimize = 1;
3507 /* If this is not first setting in current basic block,
3508 see if reg was used in between previous one and this.
3509 If so, neither one can be moved. */
3510 if (last_set[regno] != 0
3511 && reg_used_between_p (dest, last_set[regno], insn))
3512 regs->array[regno+i].may_not_optimize = 1;
3513 if (regs->array[regno+i].set_in_loop < 127)
3514 ++regs->array[regno+i].set_in_loop;
3515 last_set[regno+i] = insn;
3521 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3522 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3523 contained in insn INSN is used by any insn that precedes INSN in
3524 cyclic order starting from the loop entry point.
3526 We don't want to use INSN_LUID here because if we restrict INSN to those
3527 that have a valid INSN_LUID, it means we cannot move an invariant out
3528 from an inner loop past two loops. */
3530 static int
3531 loop_reg_used_before_p (loop, set, insn)
3532 const struct loop *loop;
3533 rtx set, insn;
3535 rtx reg = SET_DEST (set);
3536 rtx p;
3538 /* Scan forward checking for register usage. If we hit INSN, we
3539 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3540 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3542 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3543 return 1;
3545 if (p == loop->end)
3546 p = loop->start;
3549 return 0;
3553 /* Information we collect about arrays that we might want to prefetch. */
3554 struct prefetch_info
3556 struct iv_class *class; /* Class this prefetch is based on. */
3557 struct induction *giv; /* GIV this prefetch is based on. */
3558 rtx base_address; /* Start prefetching from this address plus
3559 index. */
3560 HOST_WIDE_INT index;
3561 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3562 iteration. */
3563 unsigned int bytes_accessed; /* Sum of sizes of all acceses to this
3564 prefetch area in one iteration. */
3565 unsigned int total_bytes; /* Total bytes loop will access in this block.
3566 This is set only for loops with known
3567 iteration counts and is 0xffffffff
3568 otherwise. */
3569 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3570 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3571 unsigned int write : 1; /* 1 for read/write prefetches. */
3574 /* Data used by check_store function. */
3575 struct check_store_data
3577 rtx mem_address;
3578 int mem_write;
3581 static void check_store PARAMS ((rtx, rtx, void *));
3582 static void emit_prefetch_instructions PARAMS ((struct loop *));
3583 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3585 /* Set mem_write when mem_address is found. Used as callback to
3586 note_stores. */
3587 static void
3588 check_store (x, pat, data)
3589 rtx x, pat ATTRIBUTE_UNUSED;
3590 void *data;
3592 struct check_store_data *d = (struct check_store_data *) data;
3594 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3595 d->mem_write = 1;
3598 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3599 important to get some addresses combined. Later more sophisticated
3600 transformations can be added when necesary.
3602 ??? Same trick with swapping operand is done at several other places.
3603 It can be nice to develop some common way to handle this. */
3605 static int
3606 rtx_equal_for_prefetch_p (x, y)
3607 rtx x, y;
3609 int i;
3610 int j;
3611 enum rtx_code code = GET_CODE (x);
3612 const char *fmt;
3614 if (x == y)
3615 return 1;
3616 if (code != GET_CODE (y))
3617 return 0;
3619 code = GET_CODE (x);
3621 if (GET_RTX_CLASS (code) == 'c')
3623 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3624 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3625 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3626 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3628 /* Compare the elements. If any pair of corresponding elements fails to
3629 match, return 0 for the whole thing. */
3631 fmt = GET_RTX_FORMAT (code);
3632 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3634 switch (fmt[i])
3636 case 'w':
3637 if (XWINT (x, i) != XWINT (y, i))
3638 return 0;
3639 break;
3641 case 'i':
3642 if (XINT (x, i) != XINT (y, i))
3643 return 0;
3644 break;
3646 case 'E':
3647 /* Two vectors must have the same length. */
3648 if (XVECLEN (x, i) != XVECLEN (y, i))
3649 return 0;
3651 /* And the corresponding elements must match. */
3652 for (j = 0; j < XVECLEN (x, i); j++)
3653 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3654 XVECEXP (y, i, j)) == 0)
3655 return 0;
3656 break;
3658 case 'e':
3659 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3660 return 0;
3661 break;
3663 case 's':
3664 if (strcmp (XSTR (x, i), XSTR (y, i)))
3665 return 0;
3666 break;
3668 case 'u':
3669 /* These are just backpointers, so they don't matter. */
3670 break;
3672 case '0':
3673 break;
3675 /* It is believed that rtx's at this level will never
3676 contain anything but integers and other rtx's,
3677 except for within LABEL_REFs and SYMBOL_REFs. */
3678 default:
3679 abort ();
3682 return 1;
3685 /* Remove constant addition value from the expression X (when present)
3686 and return it. */
3688 static HOST_WIDE_INT
3689 remove_constant_addition (x)
3690 rtx *x;
3692 HOST_WIDE_INT addval = 0;
3693 rtx exp = *x;
3695 /* Avoid clobbering a shared CONST expression. */
3696 if (GET_CODE (exp) == CONST)
3698 if (GET_CODE (XEXP (exp, 0)) == PLUS
3699 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3700 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3702 *x = XEXP (XEXP (exp, 0), 0);
3703 return INTVAL (XEXP (XEXP (exp, 0), 1));
3705 return 0;
3708 if (GET_CODE (exp) == CONST_INT)
3710 addval = INTVAL (exp);
3711 *x = const0_rtx;
3714 /* For plus expression recurse on ourself. */
3715 else if (GET_CODE (exp) == PLUS)
3717 addval += remove_constant_addition (&XEXP (exp, 0));
3718 addval += remove_constant_addition (&XEXP (exp, 1));
3720 /* In case our parameter was constant, remove extra zero from the
3721 expression. */
3722 if (XEXP (exp, 0) == const0_rtx)
3723 *x = XEXP (exp, 1);
3724 else if (XEXP (exp, 1) == const0_rtx)
3725 *x = XEXP (exp, 0);
3728 return addval;
3731 /* Attempt to identify accesses to arrays that are most likely to cause cache
3732 misses, and emit prefetch instructions a few prefetch blocks forward.
3734 To detect the arrays we use the GIV information that was collected by the
3735 strength reduction pass.
3737 The prefetch instructions are generated after the GIV information is done
3738 and before the strength reduction process. The new GIVs are injected into
3739 the strength reduction tables, so the prefetch addresses are optimized as
3740 well.
3742 GIVs are split into base address, stride, and constant addition values.
3743 GIVs with the same address, stride and close addition values are combined
3744 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3745 for write instructions can be used for the block we write to, on machines
3746 that support write prefetches.
3748 Several heuristics are used to determine when to prefetch. They are
3749 controlled by defined symbols that can be overridden for each target. */
3751 static void
3752 emit_prefetch_instructions (loop)
3753 struct loop *loop;
3755 int num_prefetches = 0;
3756 int num_real_prefetches = 0;
3757 int num_real_write_prefetches = 0;
3758 int num_prefetches_before = 0;
3759 int num_write_prefetches_before = 0;
3760 int ahead = 0;
3761 int i;
3762 struct iv_class *bl;
3763 struct induction *iv;
3764 struct prefetch_info info[MAX_PREFETCHES];
3765 struct loop_ivs *ivs = LOOP_IVS (loop);
3767 if (!HAVE_prefetch)
3768 return;
3770 /* Consider only loops w/o calls. When a call is done, the loop is probably
3771 slow enough to read the memory. */
3772 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3774 if (loop_dump_stream)
3775 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3777 return;
3780 /* Don't prefetch in loops known to have few iterations. */
3781 if (PREFETCH_NO_LOW_LOOPCNT
3782 && LOOP_INFO (loop)->n_iterations
3783 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3785 if (loop_dump_stream)
3786 fprintf (loop_dump_stream,
3787 "Prefetch: ignoring loop: not enough iterations.\n");
3788 return;
3791 /* Search all induction variables and pick those interesting for the prefetch
3792 machinery. */
3793 for (bl = ivs->list; bl; bl = bl->next)
3795 struct induction *biv = bl->biv, *biv1;
3796 int basestride = 0;
3798 biv1 = biv;
3800 /* Expect all BIVs to be executed in each iteration. This makes our
3801 analysis more conservative. */
3802 while (biv1)
3804 /* Discard non-constant additions that we can't handle well yet, and
3805 BIVs that are executed multiple times; such BIVs ought to be
3806 handled in the nested loop. We accept not_every_iteration BIVs,
3807 since these only result in larger strides and make our
3808 heuristics more conservative. */
3809 if (GET_CODE (biv->add_val) != CONST_INT)
3811 if (loop_dump_stream)
3813 fprintf (loop_dump_stream,
3814 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3815 REGNO (biv->src_reg), INSN_UID (biv->insn));
3816 print_rtl (loop_dump_stream, biv->add_val);
3817 fprintf (loop_dump_stream, "\n");
3819 break;
3822 if (biv->maybe_multiple)
3824 if (loop_dump_stream)
3826 fprintf (loop_dump_stream,
3827 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3828 REGNO (biv->src_reg), INSN_UID (biv->insn));
3829 print_rtl (loop_dump_stream, biv->add_val);
3830 fprintf (loop_dump_stream, "\n");
3832 break;
3835 basestride += INTVAL (biv1->add_val);
3836 biv1 = biv1->next_iv;
3839 if (biv1 || !basestride)
3840 continue;
3842 for (iv = bl->giv; iv; iv = iv->next_iv)
3844 rtx address;
3845 rtx temp;
3846 HOST_WIDE_INT index = 0;
3847 int add = 1;
3848 HOST_WIDE_INT stride = 0;
3849 int stride_sign = 1;
3850 struct check_store_data d;
3851 const char *ignore_reason = NULL;
3852 int size = GET_MODE_SIZE (GET_MODE (iv));
3854 /* See whether an induction variable is interesting to us and if
3855 not, report the reason. */
3856 if (iv->giv_type != DEST_ADDR)
3857 ignore_reason = "giv is not a destination address";
3859 /* We are interested only in constant stride memory references
3860 in order to be able to compute density easily. */
3861 else if (GET_CODE (iv->mult_val) != CONST_INT)
3862 ignore_reason = "stride is not constant";
3864 else
3866 stride = INTVAL (iv->mult_val) * basestride;
3867 if (stride < 0)
3869 stride = -stride;
3870 stride_sign = -1;
3873 /* On some targets, reversed order prefetches are not
3874 worthwhile. */
3875 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3876 ignore_reason = "reversed order stride";
3878 /* Prefetch of accesses with an extreme stride might not be
3879 worthwhile, either. */
3880 else if (PREFETCH_NO_EXTREME_STRIDE
3881 && stride > PREFETCH_EXTREME_STRIDE)
3882 ignore_reason = "extreme stride";
3884 /* Ignore GIVs with varying add values; we can't predict the
3885 value for the next iteration. */
3886 else if (!loop_invariant_p (loop, iv->add_val))
3887 ignore_reason = "giv has varying add value";
3889 /* Ignore GIVs in the nested loops; they ought to have been
3890 handled already. */
3891 else if (iv->maybe_multiple)
3892 ignore_reason = "giv is in nested loop";
3895 if (ignore_reason != NULL)
3897 if (loop_dump_stream)
3898 fprintf (loop_dump_stream,
3899 "Prefetch: ignoring giv at %d: %s.\n",
3900 INSN_UID (iv->insn), ignore_reason);
3901 continue;
3904 /* Determine the pointer to the basic array we are examining. It is
3905 the sum of the BIV's initial value and the GIV's add_val. */
3906 address = copy_rtx (iv->add_val);
3907 temp = copy_rtx (bl->initial_value);
3909 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3910 index = remove_constant_addition (&address);
3912 d.mem_write = 0;
3913 d.mem_address = *iv->location;
3915 /* When the GIV is not always executed, we might be better off by
3916 not dirtying the cache pages. */
3917 if (PREFETCH_CONDITIONAL || iv->always_executed)
3918 note_stores (PATTERN (iv->insn), check_store, &d);
3919 else
3921 if (loop_dump_stream)
3922 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3923 INSN_UID (iv->insn), "in conditional code.");
3924 continue;
3927 /* Attempt to find another prefetch to the same array and see if we
3928 can merge this one. */
3929 for (i = 0; i < num_prefetches; i++)
3930 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3931 && stride == info[i].stride)
3933 /* In case both access same array (same location
3934 just with small difference in constant indexes), merge
3935 the prefetches. Just do the later and the earlier will
3936 get prefetched from previous iteration.
3937 The artificial threshold should not be too small,
3938 but also not bigger than small portion of memory usually
3939 traversed by single loop. */
3940 if (index >= info[i].index
3941 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3943 info[i].write |= d.mem_write;
3944 info[i].bytes_accessed += size;
3945 info[i].index = index;
3946 info[i].giv = iv;
3947 info[i].class = bl;
3948 info[num_prefetches].base_address = address;
3949 add = 0;
3950 break;
3953 if (index < info[i].index
3954 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3956 info[i].write |= d.mem_write;
3957 info[i].bytes_accessed += size;
3958 add = 0;
3959 break;
3963 /* Merging failed. */
3964 if (add)
3966 info[num_prefetches].giv = iv;
3967 info[num_prefetches].class = bl;
3968 info[num_prefetches].index = index;
3969 info[num_prefetches].stride = stride;
3970 info[num_prefetches].base_address = address;
3971 info[num_prefetches].write = d.mem_write;
3972 info[num_prefetches].bytes_accessed = size;
3973 num_prefetches++;
3974 if (num_prefetches >= MAX_PREFETCHES)
3976 if (loop_dump_stream)
3977 fprintf (loop_dump_stream,
3978 "Maximal number of prefetches exceeded.\n");
3979 return;
3985 for (i = 0; i < num_prefetches; i++)
3987 int density;
3989 /* Attempt to calculate the total number of bytes fetched by all
3990 iterations of the loop. Avoid overflow. */
3991 if (LOOP_INFO (loop)->n_iterations
3992 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3993 >= LOOP_INFO (loop)->n_iterations))
3994 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
3995 else
3996 info[i].total_bytes = 0xffffffff;
3998 density = info[i].bytes_accessed * 100 / info[i].stride;
4000 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4001 if (PREFETCH_ONLY_DENSE_MEM)
4002 if (density * 256 > PREFETCH_DENSE_MEM * 100
4003 && (info[i].total_bytes / PREFETCH_BLOCK
4004 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4006 info[i].prefetch_before_loop = 1;
4007 info[i].prefetch_in_loop
4008 = (info[i].total_bytes / PREFETCH_BLOCK
4009 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4011 else
4013 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4014 if (loop_dump_stream)
4015 fprintf (loop_dump_stream,
4016 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4017 INSN_UID (info[i].giv->insn), density);
4019 else
4020 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4022 /* Find how many prefetch instructions we'll use within the loop. */
4023 if (info[i].prefetch_in_loop != 0)
4025 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4026 / PREFETCH_BLOCK);
4027 num_real_prefetches += info[i].prefetch_in_loop;
4028 if (info[i].write)
4029 num_real_write_prefetches += info[i].prefetch_in_loop;
4033 /* Determine how many iterations ahead to prefetch within the loop, based
4034 on how many prefetches we currently expect to do within the loop. */
4035 if (num_real_prefetches != 0)
4037 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4039 if (loop_dump_stream)
4040 fprintf (loop_dump_stream,
4041 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4042 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4043 num_real_prefetches = 0, num_real_write_prefetches = 0;
4046 /* We'll also use AHEAD to determine how many prefetch instructions to
4047 emit before a loop, so don't leave it zero. */
4048 if (ahead == 0)
4049 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4051 for (i = 0; i < num_prefetches; i++)
4053 /* Update if we've decided not to prefetch anything within the loop. */
4054 if (num_real_prefetches == 0)
4055 info[i].prefetch_in_loop = 0;
4057 /* Find how many prefetch instructions we'll use before the loop. */
4058 if (info[i].prefetch_before_loop != 0)
4060 int n = info[i].total_bytes / PREFETCH_BLOCK;
4061 if (n > ahead)
4062 n = ahead;
4063 info[i].prefetch_before_loop = n;
4064 num_prefetches_before += n;
4065 if (info[i].write)
4066 num_write_prefetches_before += n;
4069 if (loop_dump_stream)
4071 if (info[i].prefetch_in_loop == 0
4072 && info[i].prefetch_before_loop == 0)
4073 continue;
4074 fprintf (loop_dump_stream, "Prefetch insn: %d",
4075 INSN_UID (info[i].giv->insn));
4076 fprintf (loop_dump_stream,
4077 "; in loop: %d; before: %d; %s\n",
4078 info[i].prefetch_in_loop,
4079 info[i].prefetch_before_loop,
4080 info[i].write ? "read/write" : "read only");
4081 fprintf (loop_dump_stream,
4082 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4083 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4084 info[i].bytes_accessed, info[i].total_bytes);
4085 fprintf (loop_dump_stream, " index: ");
4086 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4087 fprintf (loop_dump_stream, "; stride: ");
4088 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4089 fprintf (loop_dump_stream, "; address: ");
4090 print_rtl (loop_dump_stream, info[i].base_address);
4091 fprintf (loop_dump_stream, "\n");
4095 if (num_real_prefetches + num_prefetches_before > 0)
4097 /* Record that this loop uses prefetch instructions. */
4098 LOOP_INFO (loop)->has_prefetch = 1;
4100 if (loop_dump_stream)
4102 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4103 num_real_prefetches, num_real_write_prefetches);
4104 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4105 num_prefetches_before, num_write_prefetches_before);
4109 for (i = 0; i < num_prefetches; i++)
4111 int y;
4113 for (y = 0; y < info[i].prefetch_in_loop; y++)
4115 rtx loc = copy_rtx (*info[i].giv->location);
4116 rtx insn;
4117 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4118 rtx before_insn = info[i].giv->insn;
4119 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4120 rtx seq;
4122 /* We can save some effort by offsetting the address on
4123 architectures with offsettable memory references. */
4124 if (offsettable_address_p (0, VOIDmode, loc))
4125 loc = plus_constant (loc, bytes_ahead);
4126 else
4128 rtx reg = gen_reg_rtx (Pmode);
4129 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4130 GEN_INT (bytes_ahead), reg,
4131 0, before_insn);
4132 loc = reg;
4135 start_sequence ();
4136 /* Make sure the address operand is valid for prefetch. */
4137 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4138 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4139 loc = force_reg (Pmode, loc);
4140 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4141 GEN_INT (3)));
4142 seq = get_insns ();
4143 end_sequence ();
4144 emit_insn_before (seq, before_insn);
4146 /* Check all insns emitted and record the new GIV
4147 information. */
4148 insn = NEXT_INSN (prev_insn);
4149 while (insn != before_insn)
4151 insn = check_insn_for_givs (loop, insn,
4152 info[i].giv->always_executed,
4153 info[i].giv->maybe_multiple);
4154 insn = NEXT_INSN (insn);
4158 if (PREFETCH_BEFORE_LOOP)
4160 /* Emit insns before the loop to fetch the first cache lines or,
4161 if we're not prefetching within the loop, everything we expect
4162 to need. */
4163 for (y = 0; y < info[i].prefetch_before_loop; y++)
4165 rtx reg = gen_reg_rtx (Pmode);
4166 rtx loop_start = loop->start;
4167 rtx init_val = info[i].class->initial_value;
4168 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4169 info[i].giv->add_val,
4170 GEN_INT (y * PREFETCH_BLOCK));
4172 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4173 non-constant INIT_VAL to have the same mode as REG, which
4174 in this case we know to be Pmode. */
4175 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4176 init_val = convert_to_mode (Pmode, init_val, 0);
4177 loop_iv_add_mult_emit_before (loop, init_val,
4178 info[i].giv->mult_val,
4179 add_val, reg, 0, loop_start);
4180 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4181 GEN_INT (3)),
4182 loop_start);
4187 return;
4190 /* A "basic induction variable" or biv is a pseudo reg that is set
4191 (within this loop) only by incrementing or decrementing it. */
4192 /* A "general induction variable" or giv is a pseudo reg whose
4193 value is a linear function of a biv. */
4195 /* Bivs are recognized by `basic_induction_var';
4196 Givs by `general_induction_var'. */
4198 /* Communication with routines called via `note_stores'. */
4200 static rtx note_insn;
4202 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4204 static rtx addr_placeholder;
4206 /* ??? Unfinished optimizations, and possible future optimizations,
4207 for the strength reduction code. */
4209 /* ??? The interaction of biv elimination, and recognition of 'constant'
4210 bivs, may cause problems. */
4212 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4213 performance problems.
4215 Perhaps don't eliminate things that can be combined with an addressing
4216 mode. Find all givs that have the same biv, mult_val, and add_val;
4217 then for each giv, check to see if its only use dies in a following
4218 memory address. If so, generate a new memory address and check to see
4219 if it is valid. If it is valid, then store the modified memory address,
4220 otherwise, mark the giv as not done so that it will get its own iv. */
4222 /* ??? Could try to optimize branches when it is known that a biv is always
4223 positive. */
4225 /* ??? When replace a biv in a compare insn, we should replace with closest
4226 giv so that an optimized branch can still be recognized by the combiner,
4227 e.g. the VAX acb insn. */
4229 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4230 was rerun in loop_optimize whenever a register was added or moved.
4231 Also, some of the optimizations could be a little less conservative. */
4233 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4234 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4235 callback.
4237 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4238 least once for every loop iteration except for the last one.
4240 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4241 loop iteration.
4243 void
4244 for_each_insn_in_loop (loop, fncall)
4245 struct loop *loop;
4246 loop_insn_callback fncall;
4248 int not_every_iteration = 0;
4249 int maybe_multiple = 0;
4250 int past_loop_latch = 0;
4251 int loop_depth = 0;
4252 rtx p;
4254 /* If loop_scan_start points to the loop exit test, we have to be wary of
4255 subversive use of gotos inside expression statements. */
4256 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4257 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4259 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4260 for (p = next_insn_in_loop (loop, loop->scan_start);
4261 p != NULL_RTX;
4262 p = next_insn_in_loop (loop, p))
4264 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4266 /* Past CODE_LABEL, we get to insns that may be executed multiple
4267 times. The only way we can be sure that they can't is if every
4268 jump insn between here and the end of the loop either
4269 returns, exits the loop, is a jump to a location that is still
4270 behind the label, or is a jump to the loop start. */
4272 if (GET_CODE (p) == CODE_LABEL)
4274 rtx insn = p;
4276 maybe_multiple = 0;
4278 while (1)
4280 insn = NEXT_INSN (insn);
4281 if (insn == loop->scan_start)
4282 break;
4283 if (insn == loop->end)
4285 if (loop->top != 0)
4286 insn = loop->top;
4287 else
4288 break;
4289 if (insn == loop->scan_start)
4290 break;
4293 if (GET_CODE (insn) == JUMP_INSN
4294 && GET_CODE (PATTERN (insn)) != RETURN
4295 && (!any_condjump_p (insn)
4296 || (JUMP_LABEL (insn) != 0
4297 && JUMP_LABEL (insn) != loop->scan_start
4298 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4300 maybe_multiple = 1;
4301 break;
4306 /* Past a jump, we get to insns for which we can't count
4307 on whether they will be executed during each iteration. */
4308 /* This code appears twice in strength_reduce. There is also similar
4309 code in scan_loop. */
4310 if (GET_CODE (p) == JUMP_INSN
4311 /* If we enter the loop in the middle, and scan around to the
4312 beginning, don't set not_every_iteration for that.
4313 This can be any kind of jump, since we want to know if insns
4314 will be executed if the loop is executed. */
4315 && !(JUMP_LABEL (p) == loop->top
4316 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4317 && any_uncondjump_p (p))
4318 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4320 rtx label = 0;
4322 /* If this is a jump outside the loop, then it also doesn't
4323 matter. Check to see if the target of this branch is on the
4324 loop->exits_labels list. */
4326 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4327 if (XEXP (label, 0) == JUMP_LABEL (p))
4328 break;
4330 if (!label)
4331 not_every_iteration = 1;
4334 else if (GET_CODE (p) == NOTE)
4336 /* At the virtual top of a converted loop, insns are again known to
4337 be executed each iteration: logically, the loop begins here
4338 even though the exit code has been duplicated.
4340 Insns are also again known to be executed each iteration at
4341 the LOOP_CONT note. */
4342 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4343 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4344 && loop_depth == 0)
4345 not_every_iteration = 0;
4346 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4347 loop_depth++;
4348 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4349 loop_depth--;
4352 /* Note if we pass a loop latch. If we do, then we can not clear
4353 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4354 a loop since a jump before the last CODE_LABEL may have started
4355 a new loop iteration.
4357 Note that LOOP_TOP is only set for rotated loops and we need
4358 this check for all loops, so compare against the CODE_LABEL
4359 which immediately follows LOOP_START. */
4360 if (GET_CODE (p) == JUMP_INSN
4361 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4362 past_loop_latch = 1;
4364 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4365 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4366 or not an insn is known to be executed each iteration of the
4367 loop, whether or not any iterations are known to occur.
4369 Therefore, if we have just passed a label and have no more labels
4370 between here and the test insn of the loop, and we have not passed
4371 a jump to the top of the loop, then we know these insns will be
4372 executed each iteration. */
4374 if (not_every_iteration
4375 && !past_loop_latch
4376 && GET_CODE (p) == CODE_LABEL
4377 && no_labels_between_p (p, loop->end)
4378 && loop_insn_first_p (p, loop->cont))
4379 not_every_iteration = 0;
4383 static void
4384 loop_bivs_find (loop)
4385 struct loop *loop;
4387 struct loop_regs *regs = LOOP_REGS (loop);
4388 struct loop_ivs *ivs = LOOP_IVS (loop);
4389 /* Temporary list pointers for traversing ivs->list. */
4390 struct iv_class *bl, **backbl;
4392 ivs->list = 0;
4394 for_each_insn_in_loop (loop, check_insn_for_bivs);
4396 /* Scan ivs->list to remove all regs that proved not to be bivs.
4397 Make a sanity check against regs->n_times_set. */
4398 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4400 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4401 /* Above happens if register modified by subreg, etc. */
4402 /* Make sure it is not recognized as a basic induction var: */
4403 || regs->array[bl->regno].n_times_set != bl->biv_count
4404 /* If never incremented, it is invariant that we decided not to
4405 move. So leave it alone. */
4406 || ! bl->incremented)
4408 if (loop_dump_stream)
4409 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4410 bl->regno,
4411 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4412 ? "not induction variable"
4413 : (! bl->incremented ? "never incremented"
4414 : "count error")));
4416 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4417 *backbl = bl->next;
4419 else
4421 backbl = &bl->next;
4423 if (loop_dump_stream)
4424 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4430 /* Determine how BIVS are initialised by looking through pre-header
4431 extended basic block. */
4432 static void
4433 loop_bivs_init_find (loop)
4434 struct loop *loop;
4436 struct loop_ivs *ivs = LOOP_IVS (loop);
4437 /* Temporary list pointers for traversing ivs->list. */
4438 struct iv_class *bl;
4439 int call_seen;
4440 rtx p;
4442 /* Find initial value for each biv by searching backwards from loop_start,
4443 halting at first label. Also record any test condition. */
4445 call_seen = 0;
4446 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4448 rtx test;
4450 note_insn = p;
4452 if (GET_CODE (p) == CALL_INSN)
4453 call_seen = 1;
4455 if (INSN_P (p))
4456 note_stores (PATTERN (p), record_initial, ivs);
4458 /* Record any test of a biv that branches around the loop if no store
4459 between it and the start of loop. We only care about tests with
4460 constants and registers and only certain of those. */
4461 if (GET_CODE (p) == JUMP_INSN
4462 && JUMP_LABEL (p) != 0
4463 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4464 && (test = get_condition_for_loop (loop, p)) != 0
4465 && GET_CODE (XEXP (test, 0)) == REG
4466 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4467 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4468 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4469 && bl->init_insn == 0)
4471 /* If an NE test, we have an initial value! */
4472 if (GET_CODE (test) == NE)
4474 bl->init_insn = p;
4475 bl->init_set = gen_rtx_SET (VOIDmode,
4476 XEXP (test, 0), XEXP (test, 1));
4478 else
4479 bl->initial_test = test;
4485 /* Look at the each biv and see if we can say anything better about its
4486 initial value from any initializing insns set up above. (This is done
4487 in two passes to avoid missing SETs in a PARALLEL.) */
4488 static void
4489 loop_bivs_check (loop)
4490 struct loop *loop;
4492 struct loop_ivs *ivs = LOOP_IVS (loop);
4493 /* Temporary list pointers for traversing ivs->list. */
4494 struct iv_class *bl;
4495 struct iv_class **backbl;
4497 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4499 rtx src;
4500 rtx note;
4502 if (! bl->init_insn)
4503 continue;
4505 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4506 is a constant, use the value of that. */
4507 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4508 && CONSTANT_P (XEXP (note, 0)))
4509 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4510 && CONSTANT_P (XEXP (note, 0))))
4511 src = XEXP (note, 0);
4512 else
4513 src = SET_SRC (bl->init_set);
4515 if (loop_dump_stream)
4516 fprintf (loop_dump_stream,
4517 "Biv %d: initialized at insn %d: initial value ",
4518 bl->regno, INSN_UID (bl->init_insn));
4520 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4521 || GET_MODE (src) == VOIDmode)
4522 && valid_initial_value_p (src, bl->init_insn,
4523 LOOP_INFO (loop)->pre_header_has_call,
4524 loop->start))
4526 bl->initial_value = src;
4528 if (loop_dump_stream)
4530 print_simple_rtl (loop_dump_stream, src);
4531 fputc ('\n', loop_dump_stream);
4534 /* If we can't make it a giv,
4535 let biv keep initial value of "itself". */
4536 else if (loop_dump_stream)
4537 fprintf (loop_dump_stream, "is complex\n");
4542 /* Search the loop for general induction variables. */
4544 static void
4545 loop_givs_find (loop)
4546 struct loop* loop;
4548 for_each_insn_in_loop (loop, check_insn_for_givs);
4552 /* For each giv for which we still don't know whether or not it is
4553 replaceable, check to see if it is replaceable because its final value
4554 can be calculated. */
4556 static void
4557 loop_givs_check (loop)
4558 struct loop *loop;
4560 struct loop_ivs *ivs = LOOP_IVS (loop);
4561 struct iv_class *bl;
4563 for (bl = ivs->list; bl; bl = bl->next)
4565 struct induction *v;
4567 for (v = bl->giv; v; v = v->next_iv)
4568 if (! v->replaceable && ! v->not_replaceable)
4569 check_final_value (loop, v);
4574 /* Return non-zero if it is possible to eliminate the biv BL provided
4575 all givs are reduced. This is possible if either the reg is not
4576 used outside the loop, or we can compute what its final value will
4577 be. */
4579 static int
4580 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4581 struct loop *loop;
4582 struct iv_class *bl;
4583 int threshold;
4584 int insn_count;
4586 /* For architectures with a decrement_and_branch_until_zero insn,
4587 don't do this if we put a REG_NONNEG note on the endtest for this
4588 biv. */
4590 #ifdef HAVE_decrement_and_branch_until_zero
4591 if (bl->nonneg)
4593 if (loop_dump_stream)
4594 fprintf (loop_dump_stream,
4595 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4596 return 0;
4598 #endif
4600 /* Check that biv is used outside loop or if it has a final value.
4601 Compare against bl->init_insn rather than loop->start. We aren't
4602 concerned with any uses of the biv between init_insn and
4603 loop->start since these won't be affected by the value of the biv
4604 elsewhere in the function, so long as init_insn doesn't use the
4605 biv itself. */
4607 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4608 && bl->init_insn
4609 && INSN_UID (bl->init_insn) < max_uid_for_loop
4610 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4611 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4612 || (bl->final_value = final_biv_value (loop, bl)))
4613 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4615 if (loop_dump_stream)
4617 fprintf (loop_dump_stream,
4618 "Cannot eliminate biv %d.\n",
4619 bl->regno);
4620 fprintf (loop_dump_stream,
4621 "First use: insn %d, last use: insn %d.\n",
4622 REGNO_FIRST_UID (bl->regno),
4623 REGNO_LAST_UID (bl->regno));
4625 return 0;
4629 /* Reduce each giv of BL that we have decided to reduce. */
4631 static void
4632 loop_givs_reduce (loop, bl)
4633 struct loop *loop;
4634 struct iv_class *bl;
4636 struct induction *v;
4638 for (v = bl->giv; v; v = v->next_iv)
4640 struct induction *tv;
4641 if (! v->ignore && v->same == 0)
4643 int auto_inc_opt = 0;
4645 /* If the code for derived givs immediately below has already
4646 allocated a new_reg, we must keep it. */
4647 if (! v->new_reg)
4648 v->new_reg = gen_reg_rtx (v->mode);
4650 #ifdef AUTO_INC_DEC
4651 /* If the target has auto-increment addressing modes, and
4652 this is an address giv, then try to put the increment
4653 immediately after its use, so that flow can create an
4654 auto-increment addressing mode. */
4655 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4656 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4657 /* We don't handle reversed biv's because bl->biv->insn
4658 does not have a valid INSN_LUID. */
4659 && ! bl->reversed
4660 && v->always_executed && ! v->maybe_multiple
4661 && INSN_UID (v->insn) < max_uid_for_loop)
4663 /* If other giv's have been combined with this one, then
4664 this will work only if all uses of the other giv's occur
4665 before this giv's insn. This is difficult to check.
4667 We simplify this by looking for the common case where
4668 there is one DEST_REG giv, and this giv's insn is the
4669 last use of the dest_reg of that DEST_REG giv. If the
4670 increment occurs after the address giv, then we can
4671 perform the optimization. (Otherwise, the increment
4672 would have to go before other_giv, and we would not be
4673 able to combine it with the address giv to get an
4674 auto-inc address.) */
4675 if (v->combined_with)
4677 struct induction *other_giv = 0;
4679 for (tv = bl->giv; tv; tv = tv->next_iv)
4680 if (tv->same == v)
4682 if (other_giv)
4683 break;
4684 else
4685 other_giv = tv;
4687 if (! tv && other_giv
4688 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4689 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4690 == INSN_UID (v->insn))
4691 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4692 auto_inc_opt = 1;
4694 /* Check for case where increment is before the address
4695 giv. Do this test in "loop order". */
4696 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4697 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4698 || (INSN_LUID (bl->biv->insn)
4699 > INSN_LUID (loop->scan_start))))
4700 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4701 && (INSN_LUID (loop->scan_start)
4702 < INSN_LUID (bl->biv->insn))))
4703 auto_inc_opt = -1;
4704 else
4705 auto_inc_opt = 1;
4707 #ifdef HAVE_cc0
4709 rtx prev;
4711 /* We can't put an insn immediately after one setting
4712 cc0, or immediately before one using cc0. */
4713 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4714 || (auto_inc_opt == -1
4715 && (prev = prev_nonnote_insn (v->insn)) != 0
4716 && INSN_P (prev)
4717 && sets_cc0_p (PATTERN (prev))))
4718 auto_inc_opt = 0;
4720 #endif
4722 if (auto_inc_opt)
4723 v->auto_inc_opt = 1;
4725 #endif
4727 /* For each place where the biv is incremented, add an insn
4728 to increment the new, reduced reg for the giv. */
4729 for (tv = bl->biv; tv; tv = tv->next_iv)
4731 rtx insert_before;
4733 if (! auto_inc_opt)
4734 insert_before = tv->insn;
4735 else if (auto_inc_opt == 1)
4736 insert_before = NEXT_INSN (v->insn);
4737 else
4738 insert_before = v->insn;
4740 if (tv->mult_val == const1_rtx)
4741 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4742 v->new_reg, v->new_reg,
4743 0, insert_before);
4744 else /* tv->mult_val == const0_rtx */
4745 /* A multiply is acceptable here
4746 since this is presumed to be seldom executed. */
4747 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4748 v->add_val, v->new_reg,
4749 0, insert_before);
4752 /* Add code at loop start to initialize giv's reduced reg. */
4754 loop_iv_add_mult_hoist (loop,
4755 extend_value_for_giv (v, bl->initial_value),
4756 v->mult_val, v->add_val, v->new_reg);
4762 /* Check for givs whose first use is their definition and whose
4763 last use is the definition of another giv. If so, it is likely
4764 dead and should not be used to derive another giv nor to
4765 eliminate a biv. */
4767 static void
4768 loop_givs_dead_check (loop, bl)
4769 struct loop *loop ATTRIBUTE_UNUSED;
4770 struct iv_class *bl;
4772 struct induction *v;
4774 for (v = bl->giv; v; v = v->next_iv)
4776 if (v->ignore
4777 || (v->same && v->same->ignore))
4778 continue;
4780 if (v->giv_type == DEST_REG
4781 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4783 struct induction *v1;
4785 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4786 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4787 v->maybe_dead = 1;
4793 static void
4794 loop_givs_rescan (loop, bl, reg_map)
4795 struct loop *loop;
4796 struct iv_class *bl;
4797 rtx *reg_map;
4799 struct induction *v;
4801 for (v = bl->giv; v; v = v->next_iv)
4803 if (v->same && v->same->ignore)
4804 v->ignore = 1;
4806 if (v->ignore)
4807 continue;
4809 /* Update expression if this was combined, in case other giv was
4810 replaced. */
4811 if (v->same)
4812 v->new_reg = replace_rtx (v->new_reg,
4813 v->same->dest_reg, v->same->new_reg);
4815 /* See if this register is known to be a pointer to something. If
4816 so, see if we can find the alignment. First see if there is a
4817 destination register that is a pointer. If so, this shares the
4818 alignment too. Next see if we can deduce anything from the
4819 computational information. If not, and this is a DEST_ADDR
4820 giv, at least we know that it's a pointer, though we don't know
4821 the alignment. */
4822 if (GET_CODE (v->new_reg) == REG
4823 && v->giv_type == DEST_REG
4824 && REG_POINTER (v->dest_reg))
4825 mark_reg_pointer (v->new_reg,
4826 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4827 else if (GET_CODE (v->new_reg) == REG
4828 && REG_POINTER (v->src_reg))
4830 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4832 if (align == 0
4833 || GET_CODE (v->add_val) != CONST_INT
4834 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4835 align = 0;
4837 mark_reg_pointer (v->new_reg, align);
4839 else if (GET_CODE (v->new_reg) == REG
4840 && GET_CODE (v->add_val) == REG
4841 && REG_POINTER (v->add_val))
4843 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4845 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4846 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4847 align = 0;
4849 mark_reg_pointer (v->new_reg, align);
4851 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4852 mark_reg_pointer (v->new_reg, 0);
4854 if (v->giv_type == DEST_ADDR)
4855 /* Store reduced reg as the address in the memref where we found
4856 this giv. */
4857 validate_change (v->insn, v->location, v->new_reg, 0);
4858 else if (v->replaceable)
4860 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4862 else
4864 rtx original_insn = v->insn;
4865 rtx note;
4867 /* Not replaceable; emit an insn to set the original giv reg from
4868 the reduced giv, same as above. */
4869 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4870 gen_move_insn (v->dest_reg,
4871 v->new_reg));
4873 /* The original insn may have a REG_EQUAL note. This note is
4874 now incorrect and may result in invalid substitutions later.
4875 The original insn is dead, but may be part of a libcall
4876 sequence, which doesn't seem worth the bother of handling. */
4877 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4878 if (note)
4879 remove_note (original_insn, note);
4882 /* When a loop is reversed, givs which depend on the reversed
4883 biv, and which are live outside the loop, must be set to their
4884 correct final value. This insn is only needed if the giv is
4885 not replaceable. The correct final value is the same as the
4886 value that the giv starts the reversed loop with. */
4887 if (bl->reversed && ! v->replaceable)
4888 loop_iv_add_mult_sink (loop,
4889 extend_value_for_giv (v, bl->initial_value),
4890 v->mult_val, v->add_val, v->dest_reg);
4891 else if (v->final_value)
4892 loop_insn_sink_or_swim (loop,
4893 gen_load_of_final_value (v->dest_reg,
4894 v->final_value));
4896 if (loop_dump_stream)
4898 fprintf (loop_dump_stream, "giv at %d reduced to ",
4899 INSN_UID (v->insn));
4900 print_simple_rtl (loop_dump_stream, v->new_reg);
4901 fprintf (loop_dump_stream, "\n");
4907 static int
4908 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4909 struct loop *loop ATTRIBUTE_UNUSED;
4910 struct iv_class *bl;
4911 struct induction *v;
4912 rtx test_reg;
4914 int add_cost;
4915 int benefit;
4917 benefit = v->benefit;
4918 PUT_MODE (test_reg, v->mode);
4919 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4920 test_reg, test_reg);
4922 /* Reduce benefit if not replaceable, since we will insert a
4923 move-insn to replace the insn that calculates this giv. Don't do
4924 this unless the giv is a user variable, since it will often be
4925 marked non-replaceable because of the duplication of the exit
4926 code outside the loop. In such a case, the copies we insert are
4927 dead and will be deleted. So they don't have a cost. Similar
4928 situations exist. */
4929 /* ??? The new final_[bg]iv_value code does a much better job of
4930 finding replaceable giv's, and hence this code may no longer be
4931 necessary. */
4932 if (! v->replaceable && ! bl->eliminable
4933 && REG_USERVAR_P (v->dest_reg))
4934 benefit -= copy_cost;
4936 /* Decrease the benefit to count the add-insns that we will insert
4937 to increment the reduced reg for the giv. ??? This can
4938 overestimate the run-time cost of the additional insns, e.g. if
4939 there are multiple basic blocks that increment the biv, but only
4940 one of these blocks is executed during each iteration. There is
4941 no good way to detect cases like this with the current structure
4942 of the loop optimizer. This code is more accurate for
4943 determining code size than run-time benefits. */
4944 benefit -= add_cost * bl->biv_count;
4946 /* Decide whether to strength-reduce this giv or to leave the code
4947 unchanged (recompute it from the biv each time it is used). This
4948 decision can be made independently for each giv. */
4950 #ifdef AUTO_INC_DEC
4951 /* Attempt to guess whether autoincrement will handle some of the
4952 new add insns; if so, increase BENEFIT (undo the subtraction of
4953 add_cost that was done above). */
4954 if (v->giv_type == DEST_ADDR
4955 /* Increasing the benefit is risky, since this is only a guess.
4956 Avoid increasing register pressure in cases where there would
4957 be no other benefit from reducing this giv. */
4958 && benefit > 0
4959 && GET_CODE (v->mult_val) == CONST_INT)
4961 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4963 if (HAVE_POST_INCREMENT
4964 && INTVAL (v->mult_val) == size)
4965 benefit += add_cost * bl->biv_count;
4966 else if (HAVE_PRE_INCREMENT
4967 && INTVAL (v->mult_val) == size)
4968 benefit += add_cost * bl->biv_count;
4969 else if (HAVE_POST_DECREMENT
4970 && -INTVAL (v->mult_val) == size)
4971 benefit += add_cost * bl->biv_count;
4972 else if (HAVE_PRE_DECREMENT
4973 && -INTVAL (v->mult_val) == size)
4974 benefit += add_cost * bl->biv_count;
4976 #endif
4978 return benefit;
4982 /* Free IV structures for LOOP. */
4984 static void
4985 loop_ivs_free (loop)
4986 struct loop *loop;
4988 struct loop_ivs *ivs = LOOP_IVS (loop);
4989 struct iv_class *iv = ivs->list;
4991 free (ivs->regs);
4993 while (iv)
4995 struct iv_class *next = iv->next;
4996 struct induction *induction;
4997 struct induction *next_induction;
4999 for (induction = iv->biv; induction; induction = next_induction)
5001 next_induction = induction->next_iv;
5002 free (induction);
5004 for (induction = iv->giv; induction; induction = next_induction)
5006 next_induction = induction->next_iv;
5007 free (induction);
5010 free (iv);
5011 iv = next;
5016 /* Perform strength reduction and induction variable elimination.
5018 Pseudo registers created during this function will be beyond the
5019 last valid index in several tables including
5020 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5021 problem here, because the added registers cannot be givs outside of
5022 their loop, and hence will never be reconsidered. But scan_loop
5023 must check regnos to make sure they are in bounds. */
5025 static void
5026 strength_reduce (loop, flags)
5027 struct loop *loop;
5028 int flags;
5030 struct loop_info *loop_info = LOOP_INFO (loop);
5031 struct loop_regs *regs = LOOP_REGS (loop);
5032 struct loop_ivs *ivs = LOOP_IVS (loop);
5033 rtx p;
5034 /* Temporary list pointer for traversing ivs->list. */
5035 struct iv_class *bl;
5036 /* Ratio of extra register life span we can justify
5037 for saving an instruction. More if loop doesn't call subroutines
5038 since in that case saving an insn makes more difference
5039 and more registers are available. */
5040 /* ??? could set this to last value of threshold in move_movables */
5041 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5042 /* Map of pseudo-register replacements. */
5043 rtx *reg_map = NULL;
5044 int reg_map_size;
5045 int unrolled_insn_copies = 0;
5046 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5047 int insn_count = count_insns_in_loop (loop);
5049 addr_placeholder = gen_reg_rtx (Pmode);
5051 ivs->n_regs = max_reg_before_loop;
5052 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5054 /* Find all BIVs in loop. */
5055 loop_bivs_find (loop);
5057 /* Exit if there are no bivs. */
5058 if (! ivs->list)
5060 /* Can still unroll the loop anyways, but indicate that there is no
5061 strength reduction info available. */
5062 if (flags & LOOP_UNROLL)
5063 unroll_loop (loop, insn_count, 0);
5065 loop_ivs_free (loop);
5066 return;
5069 /* Determine how BIVS are initialised by looking through pre-header
5070 extended basic block. */
5071 loop_bivs_init_find (loop);
5073 /* Look at the each biv and see if we can say anything better about its
5074 initial value from any initializing insns set up above. */
5075 loop_bivs_check (loop);
5077 /* Search the loop for general induction variables. */
5078 loop_givs_find (loop);
5080 /* Try to calculate and save the number of loop iterations. This is
5081 set to zero if the actual number can not be calculated. This must
5082 be called after all giv's have been identified, since otherwise it may
5083 fail if the iteration variable is a giv. */
5084 loop_iterations (loop);
5086 #ifdef HAVE_prefetch
5087 if (flags & LOOP_PREFETCH)
5088 emit_prefetch_instructions (loop);
5089 #endif
5091 /* Now for each giv for which we still don't know whether or not it is
5092 replaceable, check to see if it is replaceable because its final value
5093 can be calculated. This must be done after loop_iterations is called,
5094 so that final_giv_value will work correctly. */
5095 loop_givs_check (loop);
5097 /* Try to prove that the loop counter variable (if any) is always
5098 nonnegative; if so, record that fact with a REG_NONNEG note
5099 so that "decrement and branch until zero" insn can be used. */
5100 check_dbra_loop (loop, insn_count);
5102 /* Create reg_map to hold substitutions for replaceable giv regs.
5103 Some givs might have been made from biv increments, so look at
5104 ivs->reg_iv_type for a suitable size. */
5105 reg_map_size = ivs->n_regs;
5106 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5108 /* Examine each iv class for feasibility of strength reduction/induction
5109 variable elimination. */
5111 for (bl = ivs->list; bl; bl = bl->next)
5113 struct induction *v;
5114 int benefit;
5116 /* Test whether it will be possible to eliminate this biv
5117 provided all givs are reduced. */
5118 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5120 /* This will be true at the end, if all givs which depend on this
5121 biv have been strength reduced.
5122 We can't (currently) eliminate the biv unless this is so. */
5123 bl->all_reduced = 1;
5125 /* Check each extension dependent giv in this class to see if its
5126 root biv is safe from wrapping in the interior mode. */
5127 check_ext_dependent_givs (bl, loop_info);
5129 /* Combine all giv's for this iv_class. */
5130 combine_givs (regs, bl);
5132 for (v = bl->giv; v; v = v->next_iv)
5134 struct induction *tv;
5136 if (v->ignore || v->same)
5137 continue;
5139 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5141 /* If an insn is not to be strength reduced, then set its ignore
5142 flag, and clear bl->all_reduced. */
5144 /* A giv that depends on a reversed biv must be reduced if it is
5145 used after the loop exit, otherwise, it would have the wrong
5146 value after the loop exit. To make it simple, just reduce all
5147 of such giv's whether or not we know they are used after the loop
5148 exit. */
5150 if (! flag_reduce_all_givs
5151 && v->lifetime * threshold * benefit < insn_count
5152 && ! bl->reversed)
5154 if (loop_dump_stream)
5155 fprintf (loop_dump_stream,
5156 "giv of insn %d not worth while, %d vs %d.\n",
5157 INSN_UID (v->insn),
5158 v->lifetime * threshold * benefit, insn_count);
5159 v->ignore = 1;
5160 bl->all_reduced = 0;
5162 else
5164 /* Check that we can increment the reduced giv without a
5165 multiply insn. If not, reject it. */
5167 for (tv = bl->biv; tv; tv = tv->next_iv)
5168 if (tv->mult_val == const1_rtx
5169 && ! product_cheap_p (tv->add_val, v->mult_val))
5171 if (loop_dump_stream)
5172 fprintf (loop_dump_stream,
5173 "giv of insn %d: would need a multiply.\n",
5174 INSN_UID (v->insn));
5175 v->ignore = 1;
5176 bl->all_reduced = 0;
5177 break;
5182 /* Check for givs whose first use is their definition and whose
5183 last use is the definition of another giv. If so, it is likely
5184 dead and should not be used to derive another giv nor to
5185 eliminate a biv. */
5186 loop_givs_dead_check (loop, bl);
5188 /* Reduce each giv that we decided to reduce. */
5189 loop_givs_reduce (loop, bl);
5191 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5192 as not reduced.
5194 For each giv register that can be reduced now: if replaceable,
5195 substitute reduced reg wherever the old giv occurs;
5196 else add new move insn "giv_reg = reduced_reg". */
5197 loop_givs_rescan (loop, bl, reg_map);
5199 /* All the givs based on the biv bl have been reduced if they
5200 merit it. */
5202 /* For each giv not marked as maybe dead that has been combined with a
5203 second giv, clear any "maybe dead" mark on that second giv.
5204 v->new_reg will either be or refer to the register of the giv it
5205 combined with.
5207 Doing this clearing avoids problems in biv elimination where
5208 a giv's new_reg is a complex value that can't be put in the
5209 insn but the giv combined with (with a reg as new_reg) is
5210 marked maybe_dead. Since the register will be used in either
5211 case, we'd prefer it be used from the simpler giv. */
5213 for (v = bl->giv; v; v = v->next_iv)
5214 if (! v->maybe_dead && v->same)
5215 v->same->maybe_dead = 0;
5217 /* Try to eliminate the biv, if it is a candidate.
5218 This won't work if ! bl->all_reduced,
5219 since the givs we planned to use might not have been reduced.
5221 We have to be careful that we didn't initially think we could
5222 eliminate this biv because of a giv that we now think may be
5223 dead and shouldn't be used as a biv replacement.
5225 Also, there is the possibility that we may have a giv that looks
5226 like it can be used to eliminate a biv, but the resulting insn
5227 isn't valid. This can happen, for example, on the 88k, where a
5228 JUMP_INSN can compare a register only with zero. Attempts to
5229 replace it with a compare with a constant will fail.
5231 Note that in cases where this call fails, we may have replaced some
5232 of the occurrences of the biv with a giv, but no harm was done in
5233 doing so in the rare cases where it can occur. */
5235 if (bl->all_reduced == 1 && bl->eliminable
5236 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5238 /* ?? If we created a new test to bypass the loop entirely,
5239 or otherwise drop straight in, based on this test, then
5240 we might want to rewrite it also. This way some later
5241 pass has more hope of removing the initialization of this
5242 biv entirely. */
5244 /* If final_value != 0, then the biv may be used after loop end
5245 and we must emit an insn to set it just in case.
5247 Reversed bivs already have an insn after the loop setting their
5248 value, so we don't need another one. We can't calculate the
5249 proper final value for such a biv here anyways. */
5250 if (bl->final_value && ! bl->reversed)
5251 loop_insn_sink_or_swim (loop,
5252 gen_load_of_final_value (bl->biv->dest_reg,
5253 bl->final_value));
5255 if (loop_dump_stream)
5256 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5257 bl->regno);
5259 /* See above note wrt final_value. But since we couldn't eliminate
5260 the biv, we must set the value after the loop instead of before. */
5261 else if (bl->final_value && ! bl->reversed)
5262 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5263 bl->final_value));
5266 /* Go through all the instructions in the loop, making all the
5267 register substitutions scheduled in REG_MAP. */
5269 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5270 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5271 || GET_CODE (p) == CALL_INSN)
5273 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5274 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5275 INSN_CODE (p) = -1;
5278 if (loop_info->n_iterations > 0)
5280 /* When we completely unroll a loop we will likely not need the increment
5281 of the loop BIV and we will not need the conditional branch at the
5282 end of the loop. */
5283 unrolled_insn_copies = insn_count - 2;
5285 #ifdef HAVE_cc0
5286 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5287 need the comparison before the conditional branch at the end of the
5288 loop. */
5289 unrolled_insn_copies -= 1;
5290 #endif
5292 /* We'll need one copy for each loop iteration. */
5293 unrolled_insn_copies *= loop_info->n_iterations;
5295 /* A little slop to account for the ability to remove initialization
5296 code, better CSE, and other secondary benefits of completely
5297 unrolling some loops. */
5298 unrolled_insn_copies -= 1;
5300 /* Clamp the value. */
5301 if (unrolled_insn_copies < 0)
5302 unrolled_insn_copies = 0;
5305 /* Unroll loops from within strength reduction so that we can use the
5306 induction variable information that strength_reduce has already
5307 collected. Always unroll loops that would be as small or smaller
5308 unrolled than when rolled. */
5309 if ((flags & LOOP_UNROLL)
5310 || ((flags & LOOP_AUTO_UNROLL)
5311 && loop_info->n_iterations > 0
5312 && unrolled_insn_copies <= insn_count))
5313 unroll_loop (loop, insn_count, 1);
5315 #ifdef HAVE_doloop_end
5316 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5317 doloop_optimize (loop);
5318 #endif /* HAVE_doloop_end */
5320 /* In case number of iterations is known, drop branch prediction note
5321 in the branch. Do that only in second loop pass, as loop unrolling
5322 may change the number of iterations performed. */
5323 if (flags & LOOP_BCT)
5325 unsigned HOST_WIDE_INT n
5326 = loop_info->n_iterations / loop_info->unroll_number;
5327 if (n > 1)
5328 predict_insn (PREV_INSN (loop->end), PRED_LOOP_ITERATIONS,
5329 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5332 if (loop_dump_stream)
5333 fprintf (loop_dump_stream, "\n");
5335 loop_ivs_free (loop);
5336 if (reg_map)
5337 free (reg_map);
5340 /*Record all basic induction variables calculated in the insn. */
5341 static rtx
5342 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5343 struct loop *loop;
5344 rtx p;
5345 int not_every_iteration;
5346 int maybe_multiple;
5348 struct loop_ivs *ivs = LOOP_IVS (loop);
5349 rtx set;
5350 rtx dest_reg;
5351 rtx inc_val;
5352 rtx mult_val;
5353 rtx *location;
5355 if (GET_CODE (p) == INSN
5356 && (set = single_set (p))
5357 && GET_CODE (SET_DEST (set)) == REG)
5359 dest_reg = SET_DEST (set);
5360 if (REGNO (dest_reg) < max_reg_before_loop
5361 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5362 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5364 if (basic_induction_var (loop, SET_SRC (set),
5365 GET_MODE (SET_SRC (set)),
5366 dest_reg, p, &inc_val, &mult_val,
5367 &location))
5369 /* It is a possible basic induction variable.
5370 Create and initialize an induction structure for it. */
5372 struct induction *v
5373 = (struct induction *) xmalloc (sizeof (struct induction));
5375 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5376 not_every_iteration, maybe_multiple);
5377 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5379 else if (REGNO (dest_reg) < ivs->n_regs)
5380 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5383 return p;
5386 /* Record all givs calculated in the insn.
5387 A register is a giv if: it is only set once, it is a function of a
5388 biv and a constant (or invariant), and it is not a biv. */
5389 static rtx
5390 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5391 struct loop *loop;
5392 rtx p;
5393 int not_every_iteration;
5394 int maybe_multiple;
5396 struct loop_regs *regs = LOOP_REGS (loop);
5398 rtx set;
5399 /* Look for a general induction variable in a register. */
5400 if (GET_CODE (p) == INSN
5401 && (set = single_set (p))
5402 && GET_CODE (SET_DEST (set)) == REG
5403 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5405 rtx src_reg;
5406 rtx dest_reg;
5407 rtx add_val;
5408 rtx mult_val;
5409 rtx ext_val;
5410 int benefit;
5411 rtx regnote = 0;
5412 rtx last_consec_insn;
5414 dest_reg = SET_DEST (set);
5415 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5416 return p;
5418 if (/* SET_SRC is a giv. */
5419 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5420 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5421 /* Equivalent expression is a giv. */
5422 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5423 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5424 &add_val, &mult_val, &ext_val, 0,
5425 &benefit, VOIDmode)))
5426 /* Don't try to handle any regs made by loop optimization.
5427 We have nothing on them in regno_first_uid, etc. */
5428 && REGNO (dest_reg) < max_reg_before_loop
5429 /* Don't recognize a BASIC_INDUCT_VAR here. */
5430 && dest_reg != src_reg
5431 /* This must be the only place where the register is set. */
5432 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5433 /* or all sets must be consecutive and make a giv. */
5434 || (benefit = consec_sets_giv (loop, benefit, p,
5435 src_reg, dest_reg,
5436 &add_val, &mult_val, &ext_val,
5437 &last_consec_insn))))
5439 struct induction *v
5440 = (struct induction *) xmalloc (sizeof (struct induction));
5442 /* If this is a library call, increase benefit. */
5443 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5444 benefit += libcall_benefit (p);
5446 /* Skip the consecutive insns, if there are any. */
5447 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5448 p = last_consec_insn;
5450 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5451 ext_val, benefit, DEST_REG, not_every_iteration,
5452 maybe_multiple, (rtx*) 0);
5457 #ifndef DONT_REDUCE_ADDR
5458 /* Look for givs which are memory addresses. */
5459 /* This resulted in worse code on a VAX 8600. I wonder if it
5460 still does. */
5461 if (GET_CODE (p) == INSN)
5462 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5463 maybe_multiple);
5464 #endif
5466 /* Update the status of whether giv can derive other givs. This can
5467 change when we pass a label or an insn that updates a biv. */
5468 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5469 || GET_CODE (p) == CODE_LABEL)
5470 update_giv_derive (loop, p);
5471 return p;
5474 /* Return 1 if X is a valid source for an initial value (or as value being
5475 compared against in an initial test).
5477 X must be either a register or constant and must not be clobbered between
5478 the current insn and the start of the loop.
5480 INSN is the insn containing X. */
5482 static int
5483 valid_initial_value_p (x, insn, call_seen, loop_start)
5484 rtx x;
5485 rtx insn;
5486 int call_seen;
5487 rtx loop_start;
5489 if (CONSTANT_P (x))
5490 return 1;
5492 /* Only consider pseudos we know about initialized in insns whose luids
5493 we know. */
5494 if (GET_CODE (x) != REG
5495 || REGNO (x) >= max_reg_before_loop)
5496 return 0;
5498 /* Don't use call-clobbered registers across a call which clobbers it. On
5499 some machines, don't use any hard registers at all. */
5500 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5501 && (SMALL_REGISTER_CLASSES
5502 || (call_used_regs[REGNO (x)] && call_seen)))
5503 return 0;
5505 /* Don't use registers that have been clobbered before the start of the
5506 loop. */
5507 if (reg_set_between_p (x, insn, loop_start))
5508 return 0;
5510 return 1;
5513 /* Scan X for memory refs and check each memory address
5514 as a possible giv. INSN is the insn whose pattern X comes from.
5515 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5516 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5517 more thanonce in each loop iteration. */
5519 static void
5520 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5521 const struct loop *loop;
5522 rtx x;
5523 rtx insn;
5524 int not_every_iteration, maybe_multiple;
5526 int i, j;
5527 enum rtx_code code;
5528 const char *fmt;
5530 if (x == 0)
5531 return;
5533 code = GET_CODE (x);
5534 switch (code)
5536 case REG:
5537 case CONST_INT:
5538 case CONST:
5539 case CONST_DOUBLE:
5540 case SYMBOL_REF:
5541 case LABEL_REF:
5542 case PC:
5543 case CC0:
5544 case ADDR_VEC:
5545 case ADDR_DIFF_VEC:
5546 case USE:
5547 case CLOBBER:
5548 return;
5550 case MEM:
5552 rtx src_reg;
5553 rtx add_val;
5554 rtx mult_val;
5555 rtx ext_val;
5556 int benefit;
5558 /* This code used to disable creating GIVs with mult_val == 1 and
5559 add_val == 0. However, this leads to lost optimizations when
5560 it comes time to combine a set of related DEST_ADDR GIVs, since
5561 this one would not be seen. */
5563 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5564 &mult_val, &ext_val, 1, &benefit,
5565 GET_MODE (x)))
5567 /* Found one; record it. */
5568 struct induction *v
5569 = (struct induction *) xmalloc (sizeof (struct induction));
5571 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5572 add_val, ext_val, benefit, DEST_ADDR,
5573 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5575 v->mem = x;
5578 return;
5580 default:
5581 break;
5584 /* Recursively scan the subexpressions for other mem refs. */
5586 fmt = GET_RTX_FORMAT (code);
5587 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5588 if (fmt[i] == 'e')
5589 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5590 maybe_multiple);
5591 else if (fmt[i] == 'E')
5592 for (j = 0; j < XVECLEN (x, i); j++)
5593 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5594 maybe_multiple);
5597 /* Fill in the data about one biv update.
5598 V is the `struct induction' in which we record the biv. (It is
5599 allocated by the caller, with alloca.)
5600 INSN is the insn that sets it.
5601 DEST_REG is the biv's reg.
5603 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5604 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5605 being set to INC_VAL.
5607 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5608 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5609 can be executed more than once per iteration. If MAYBE_MULTIPLE
5610 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5611 executed exactly once per iteration. */
5613 static void
5614 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5615 not_every_iteration, maybe_multiple)
5616 struct loop *loop;
5617 struct induction *v;
5618 rtx insn;
5619 rtx dest_reg;
5620 rtx inc_val;
5621 rtx mult_val;
5622 rtx *location;
5623 int not_every_iteration;
5624 int maybe_multiple;
5626 struct loop_ivs *ivs = LOOP_IVS (loop);
5627 struct iv_class *bl;
5629 v->insn = insn;
5630 v->src_reg = dest_reg;
5631 v->dest_reg = dest_reg;
5632 v->mult_val = mult_val;
5633 v->add_val = inc_val;
5634 v->ext_dependent = NULL_RTX;
5635 v->location = location;
5636 v->mode = GET_MODE (dest_reg);
5637 v->always_computable = ! not_every_iteration;
5638 v->always_executed = ! not_every_iteration;
5639 v->maybe_multiple = maybe_multiple;
5641 /* Add this to the reg's iv_class, creating a class
5642 if this is the first incrementation of the reg. */
5644 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5645 if (bl == 0)
5647 /* Create and initialize new iv_class. */
5649 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5651 bl->regno = REGNO (dest_reg);
5652 bl->biv = 0;
5653 bl->giv = 0;
5654 bl->biv_count = 0;
5655 bl->giv_count = 0;
5657 /* Set initial value to the reg itself. */
5658 bl->initial_value = dest_reg;
5659 bl->final_value = 0;
5660 /* We haven't seen the initializing insn yet */
5661 bl->init_insn = 0;
5662 bl->init_set = 0;
5663 bl->initial_test = 0;
5664 bl->incremented = 0;
5665 bl->eliminable = 0;
5666 bl->nonneg = 0;
5667 bl->reversed = 0;
5668 bl->total_benefit = 0;
5670 /* Add this class to ivs->list. */
5671 bl->next = ivs->list;
5672 ivs->list = bl;
5674 /* Put it in the array of biv register classes. */
5675 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5678 /* Update IV_CLASS entry for this biv. */
5679 v->next_iv = bl->biv;
5680 bl->biv = v;
5681 bl->biv_count++;
5682 if (mult_val == const1_rtx)
5683 bl->incremented = 1;
5685 if (loop_dump_stream)
5686 loop_biv_dump (v, loop_dump_stream, 0);
5689 /* Fill in the data about one giv.
5690 V is the `struct induction' in which we record the giv. (It is
5691 allocated by the caller, with alloca.)
5692 INSN is the insn that sets it.
5693 BENEFIT estimates the savings from deleting this insn.
5694 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5695 into a register or is used as a memory address.
5697 SRC_REG is the biv reg which the giv is computed from.
5698 DEST_REG is the giv's reg (if the giv is stored in a reg).
5699 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5700 LOCATION points to the place where this giv's value appears in INSN. */
5702 static void
5703 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5704 benefit, type, not_every_iteration, maybe_multiple, location)
5705 const struct loop *loop;
5706 struct induction *v;
5707 rtx insn;
5708 rtx src_reg;
5709 rtx dest_reg;
5710 rtx mult_val, add_val, ext_val;
5711 int benefit;
5712 enum g_types type;
5713 int not_every_iteration, maybe_multiple;
5714 rtx *location;
5716 struct loop_ivs *ivs = LOOP_IVS (loop);
5717 struct induction *b;
5718 struct iv_class *bl;
5719 rtx set = single_set (insn);
5720 rtx temp;
5722 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5723 undo the MULT canonicalization that we performed earlier. */
5724 temp = simplify_rtx (add_val);
5725 if (temp
5726 && ! (GET_CODE (add_val) == MULT
5727 && GET_CODE (temp) == ASHIFT))
5728 add_val = temp;
5730 v->insn = insn;
5731 v->src_reg = src_reg;
5732 v->giv_type = type;
5733 v->dest_reg = dest_reg;
5734 v->mult_val = mult_val;
5735 v->add_val = add_val;
5736 v->ext_dependent = ext_val;
5737 v->benefit = benefit;
5738 v->location = location;
5739 v->cant_derive = 0;
5740 v->combined_with = 0;
5741 v->maybe_multiple = maybe_multiple;
5742 v->maybe_dead = 0;
5743 v->derive_adjustment = 0;
5744 v->same = 0;
5745 v->ignore = 0;
5746 v->new_reg = 0;
5747 v->final_value = 0;
5748 v->same_insn = 0;
5749 v->auto_inc_opt = 0;
5750 v->unrolled = 0;
5751 v->shared = 0;
5753 /* The v->always_computable field is used in update_giv_derive, to
5754 determine whether a giv can be used to derive another giv. For a
5755 DEST_REG giv, INSN computes a new value for the giv, so its value
5756 isn't computable if INSN insn't executed every iteration.
5757 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5758 it does not compute a new value. Hence the value is always computable
5759 regardless of whether INSN is executed each iteration. */
5761 if (type == DEST_ADDR)
5762 v->always_computable = 1;
5763 else
5764 v->always_computable = ! not_every_iteration;
5766 v->always_executed = ! not_every_iteration;
5768 if (type == DEST_ADDR)
5770 v->mode = GET_MODE (*location);
5771 v->lifetime = 1;
5773 else /* type == DEST_REG */
5775 v->mode = GET_MODE (SET_DEST (set));
5777 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5779 /* If the lifetime is zero, it means that this register is
5780 really a dead store. So mark this as a giv that can be
5781 ignored. This will not prevent the biv from being eliminated. */
5782 if (v->lifetime == 0)
5783 v->ignore = 1;
5785 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5786 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5789 /* Add the giv to the class of givs computed from one biv. */
5791 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5792 if (bl)
5794 v->next_iv = bl->giv;
5795 bl->giv = v;
5796 /* Don't count DEST_ADDR. This is supposed to count the number of
5797 insns that calculate givs. */
5798 if (type == DEST_REG)
5799 bl->giv_count++;
5800 bl->total_benefit += benefit;
5802 else
5803 /* Fatal error, biv missing for this giv? */
5804 abort ();
5806 if (type == DEST_ADDR)
5807 v->replaceable = 1;
5808 else
5810 /* The giv can be replaced outright by the reduced register only if all
5811 of the following conditions are true:
5812 - the insn that sets the giv is always executed on any iteration
5813 on which the giv is used at all
5814 (there are two ways to deduce this:
5815 either the insn is executed on every iteration,
5816 or all uses follow that insn in the same basic block),
5817 - the giv is not used outside the loop
5818 - no assignments to the biv occur during the giv's lifetime. */
5820 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5821 /* Previous line always fails if INSN was moved by loop opt. */
5822 && REGNO_LAST_LUID (REGNO (dest_reg))
5823 < INSN_LUID (loop->end)
5824 && (! not_every_iteration
5825 || last_use_this_basic_block (dest_reg, insn)))
5827 /* Now check that there are no assignments to the biv within the
5828 giv's lifetime. This requires two separate checks. */
5830 /* Check each biv update, and fail if any are between the first
5831 and last use of the giv.
5833 If this loop contains an inner loop that was unrolled, then
5834 the insn modifying the biv may have been emitted by the loop
5835 unrolling code, and hence does not have a valid luid. Just
5836 mark the biv as not replaceable in this case. It is not very
5837 useful as a biv, because it is used in two different loops.
5838 It is very unlikely that we would be able to optimize the giv
5839 using this biv anyways. */
5841 v->replaceable = 1;
5842 for (b = bl->biv; b; b = b->next_iv)
5844 if (INSN_UID (b->insn) >= max_uid_for_loop
5845 || ((INSN_LUID (b->insn)
5846 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5847 && (INSN_LUID (b->insn)
5848 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5850 v->replaceable = 0;
5851 v->not_replaceable = 1;
5852 break;
5856 /* If there are any backwards branches that go from after the
5857 biv update to before it, then this giv is not replaceable. */
5858 if (v->replaceable)
5859 for (b = bl->biv; b; b = b->next_iv)
5860 if (back_branch_in_range_p (loop, b->insn))
5862 v->replaceable = 0;
5863 v->not_replaceable = 1;
5864 break;
5867 else
5869 /* May still be replaceable, we don't have enough info here to
5870 decide. */
5871 v->replaceable = 0;
5872 v->not_replaceable = 0;
5876 /* Record whether the add_val contains a const_int, for later use by
5877 combine_givs. */
5879 rtx tem = add_val;
5881 v->no_const_addval = 1;
5882 if (tem == const0_rtx)
5884 else if (CONSTANT_P (add_val))
5885 v->no_const_addval = 0;
5886 if (GET_CODE (tem) == PLUS)
5888 while (1)
5890 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5891 tem = XEXP (tem, 0);
5892 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5893 tem = XEXP (tem, 1);
5894 else
5895 break;
5897 if (CONSTANT_P (XEXP (tem, 1)))
5898 v->no_const_addval = 0;
5902 if (loop_dump_stream)
5903 loop_giv_dump (v, loop_dump_stream, 0);
5906 /* All this does is determine whether a giv can be made replaceable because
5907 its final value can be calculated. This code can not be part of record_giv
5908 above, because final_giv_value requires that the number of loop iterations
5909 be known, and that can not be accurately calculated until after all givs
5910 have been identified. */
5912 static void
5913 check_final_value (loop, v)
5914 const struct loop *loop;
5915 struct induction *v;
5917 struct loop_ivs *ivs = LOOP_IVS (loop);
5918 struct iv_class *bl;
5919 rtx final_value = 0;
5921 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5923 /* DEST_ADDR givs will never reach here, because they are always marked
5924 replaceable above in record_giv. */
5926 /* The giv can be replaced outright by the reduced register only if all
5927 of the following conditions are true:
5928 - the insn that sets the giv is always executed on any iteration
5929 on which the giv is used at all
5930 (there are two ways to deduce this:
5931 either the insn is executed on every iteration,
5932 or all uses follow that insn in the same basic block),
5933 - its final value can be calculated (this condition is different
5934 than the one above in record_giv)
5935 - it's not used before the it's set
5936 - no assignments to the biv occur during the giv's lifetime. */
5938 #if 0
5939 /* This is only called now when replaceable is known to be false. */
5940 /* Clear replaceable, so that it won't confuse final_giv_value. */
5941 v->replaceable = 0;
5942 #endif
5944 if ((final_value = final_giv_value (loop, v))
5945 && (v->always_executed
5946 || last_use_this_basic_block (v->dest_reg, v->insn)))
5948 int biv_increment_seen = 0, before_giv_insn = 0;
5949 rtx p = v->insn;
5950 rtx last_giv_use;
5952 v->replaceable = 1;
5954 /* When trying to determine whether or not a biv increment occurs
5955 during the lifetime of the giv, we can ignore uses of the variable
5956 outside the loop because final_value is true. Hence we can not
5957 use regno_last_uid and regno_first_uid as above in record_giv. */
5959 /* Search the loop to determine whether any assignments to the
5960 biv occur during the giv's lifetime. Start with the insn
5961 that sets the giv, and search around the loop until we come
5962 back to that insn again.
5964 Also fail if there is a jump within the giv's lifetime that jumps
5965 to somewhere outside the lifetime but still within the loop. This
5966 catches spaghetti code where the execution order is not linear, and
5967 hence the above test fails. Here we assume that the giv lifetime
5968 does not extend from one iteration of the loop to the next, so as
5969 to make the test easier. Since the lifetime isn't known yet,
5970 this requires two loops. See also record_giv above. */
5972 last_giv_use = v->insn;
5974 while (1)
5976 p = NEXT_INSN (p);
5977 if (p == loop->end)
5979 before_giv_insn = 1;
5980 p = NEXT_INSN (loop->start);
5982 if (p == v->insn)
5983 break;
5985 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5986 || GET_CODE (p) == CALL_INSN)
5988 /* It is possible for the BIV increment to use the GIV if we
5989 have a cycle. Thus we must be sure to check each insn for
5990 both BIV and GIV uses, and we must check for BIV uses
5991 first. */
5993 if (! biv_increment_seen
5994 && reg_set_p (v->src_reg, PATTERN (p)))
5995 biv_increment_seen = 1;
5997 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5999 if (biv_increment_seen || before_giv_insn)
6001 v->replaceable = 0;
6002 v->not_replaceable = 1;
6003 break;
6005 last_giv_use = p;
6010 /* Now that the lifetime of the giv is known, check for branches
6011 from within the lifetime to outside the lifetime if it is still
6012 replaceable. */
6014 if (v->replaceable)
6016 p = v->insn;
6017 while (1)
6019 p = NEXT_INSN (p);
6020 if (p == loop->end)
6021 p = NEXT_INSN (loop->start);
6022 if (p == last_giv_use)
6023 break;
6025 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6026 && LABEL_NAME (JUMP_LABEL (p))
6027 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6028 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6029 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6030 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6032 v->replaceable = 0;
6033 v->not_replaceable = 1;
6035 if (loop_dump_stream)
6036 fprintf (loop_dump_stream,
6037 "Found branch outside giv lifetime.\n");
6039 break;
6044 /* If it is replaceable, then save the final value. */
6045 if (v->replaceable)
6046 v->final_value = final_value;
6049 if (loop_dump_stream && v->replaceable)
6050 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6051 INSN_UID (v->insn), REGNO (v->dest_reg));
6054 /* Update the status of whether a giv can derive other givs.
6056 We need to do something special if there is or may be an update to the biv
6057 between the time the giv is defined and the time it is used to derive
6058 another giv.
6060 In addition, a giv that is only conditionally set is not allowed to
6061 derive another giv once a label has been passed.
6063 The cases we look at are when a label or an update to a biv is passed. */
6065 static void
6066 update_giv_derive (loop, p)
6067 const struct loop *loop;
6068 rtx p;
6070 struct loop_ivs *ivs = LOOP_IVS (loop);
6071 struct iv_class *bl;
6072 struct induction *biv, *giv;
6073 rtx tem;
6074 int dummy;
6076 /* Search all IV classes, then all bivs, and finally all givs.
6078 There are three cases we are concerned with. First we have the situation
6079 of a giv that is only updated conditionally. In that case, it may not
6080 derive any givs after a label is passed.
6082 The second case is when a biv update occurs, or may occur, after the
6083 definition of a giv. For certain biv updates (see below) that are
6084 known to occur between the giv definition and use, we can adjust the
6085 giv definition. For others, or when the biv update is conditional,
6086 we must prevent the giv from deriving any other givs. There are two
6087 sub-cases within this case.
6089 If this is a label, we are concerned with any biv update that is done
6090 conditionally, since it may be done after the giv is defined followed by
6091 a branch here (actually, we need to pass both a jump and a label, but
6092 this extra tracking doesn't seem worth it).
6094 If this is a jump, we are concerned about any biv update that may be
6095 executed multiple times. We are actually only concerned about
6096 backward jumps, but it is probably not worth performing the test
6097 on the jump again here.
6099 If this is a biv update, we must adjust the giv status to show that a
6100 subsequent biv update was performed. If this adjustment cannot be done,
6101 the giv cannot derive further givs. */
6103 for (bl = ivs->list; bl; bl = bl->next)
6104 for (biv = bl->biv; biv; biv = biv->next_iv)
6105 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6106 || biv->insn == p)
6108 for (giv = bl->giv; giv; giv = giv->next_iv)
6110 /* If cant_derive is already true, there is no point in
6111 checking all of these conditions again. */
6112 if (giv->cant_derive)
6113 continue;
6115 /* If this giv is conditionally set and we have passed a label,
6116 it cannot derive anything. */
6117 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6118 giv->cant_derive = 1;
6120 /* Skip givs that have mult_val == 0, since
6121 they are really invariants. Also skip those that are
6122 replaceable, since we know their lifetime doesn't contain
6123 any biv update. */
6124 else if (giv->mult_val == const0_rtx || giv->replaceable)
6125 continue;
6127 /* The only way we can allow this giv to derive another
6128 is if this is a biv increment and we can form the product
6129 of biv->add_val and giv->mult_val. In this case, we will
6130 be able to compute a compensation. */
6131 else if (biv->insn == p)
6133 rtx ext_val_dummy;
6135 tem = 0;
6136 if (biv->mult_val == const1_rtx)
6137 tem = simplify_giv_expr (loop,
6138 gen_rtx_MULT (giv->mode,
6139 biv->add_val,
6140 giv->mult_val),
6141 &ext_val_dummy, &dummy);
6143 if (tem && giv->derive_adjustment)
6144 tem = simplify_giv_expr
6145 (loop,
6146 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6147 &ext_val_dummy, &dummy);
6149 if (tem)
6150 giv->derive_adjustment = tem;
6151 else
6152 giv->cant_derive = 1;
6154 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6155 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6156 giv->cant_derive = 1;
6161 /* Check whether an insn is an increment legitimate for a basic induction var.
6162 X is the source of insn P, or a part of it.
6163 MODE is the mode in which X should be interpreted.
6165 DEST_REG is the putative biv, also the destination of the insn.
6166 We accept patterns of these forms:
6167 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6168 REG = INVARIANT + REG
6170 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6171 store the additive term into *INC_VAL, and store the place where
6172 we found the additive term into *LOCATION.
6174 If X is an assignment of an invariant into DEST_REG, we set
6175 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6177 We also want to detect a BIV when it corresponds to a variable
6178 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6179 of the variable may be a PLUS that adds a SUBREG of that variable to
6180 an invariant and then sign- or zero-extends the result of the PLUS
6181 into the variable.
6183 Most GIVs in such cases will be in the promoted mode, since that is the
6184 probably the natural computation mode (and almost certainly the mode
6185 used for addresses) on the machine. So we view the pseudo-reg containing
6186 the variable as the BIV, as if it were simply incremented.
6188 Note that treating the entire pseudo as a BIV will result in making
6189 simple increments to any GIVs based on it. However, if the variable
6190 overflows in its declared mode but not its promoted mode, the result will
6191 be incorrect. This is acceptable if the variable is signed, since
6192 overflows in such cases are undefined, but not if it is unsigned, since
6193 those overflows are defined. So we only check for SIGN_EXTEND and
6194 not ZERO_EXTEND.
6196 If we cannot find a biv, we return 0. */
6198 static int
6199 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6200 const struct loop *loop;
6201 rtx x;
6202 enum machine_mode mode;
6203 rtx dest_reg;
6204 rtx p;
6205 rtx *inc_val;
6206 rtx *mult_val;
6207 rtx **location;
6209 enum rtx_code code;
6210 rtx *argp, arg;
6211 rtx insn, set = 0;
6213 code = GET_CODE (x);
6214 *location = NULL;
6215 switch (code)
6217 case PLUS:
6218 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6219 || (GET_CODE (XEXP (x, 0)) == SUBREG
6220 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6221 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6223 argp = &XEXP (x, 1);
6225 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6226 || (GET_CODE (XEXP (x, 1)) == SUBREG
6227 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6228 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6230 argp = &XEXP (x, 0);
6232 else
6233 return 0;
6235 arg = *argp;
6236 if (loop_invariant_p (loop, arg) != 1)
6237 return 0;
6239 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6240 *mult_val = const1_rtx;
6241 *location = argp;
6242 return 1;
6244 case SUBREG:
6245 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6246 handle addition of promoted variables.
6247 ??? The comment at the start of this function is wrong: promoted
6248 variable increments don't look like it says they do. */
6249 return basic_induction_var (loop, SUBREG_REG (x),
6250 GET_MODE (SUBREG_REG (x)),
6251 dest_reg, p, inc_val, mult_val, location);
6253 case REG:
6254 /* If this register is assigned in a previous insn, look at its
6255 source, but don't go outside the loop or past a label. */
6257 /* If this sets a register to itself, we would repeat any previous
6258 biv increment if we applied this strategy blindly. */
6259 if (rtx_equal_p (dest_reg, x))
6260 return 0;
6262 insn = p;
6263 while (1)
6265 rtx dest;
6268 insn = PREV_INSN (insn);
6270 while (insn && GET_CODE (insn) == NOTE
6271 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6273 if (!insn)
6274 break;
6275 set = single_set (insn);
6276 if (set == 0)
6277 break;
6278 dest = SET_DEST (set);
6279 if (dest == x
6280 || (GET_CODE (dest) == SUBREG
6281 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6282 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6283 && SUBREG_REG (dest) == x))
6284 return basic_induction_var (loop, SET_SRC (set),
6285 (GET_MODE (SET_SRC (set)) == VOIDmode
6286 ? GET_MODE (x)
6287 : GET_MODE (SET_SRC (set))),
6288 dest_reg, insn,
6289 inc_val, mult_val, location);
6291 while (GET_CODE (dest) == SIGN_EXTRACT
6292 || GET_CODE (dest) == ZERO_EXTRACT
6293 || GET_CODE (dest) == SUBREG
6294 || GET_CODE (dest) == STRICT_LOW_PART)
6295 dest = XEXP (dest, 0);
6296 if (dest == x)
6297 break;
6299 /* Fall through. */
6301 /* Can accept constant setting of biv only when inside inner most loop.
6302 Otherwise, a biv of an inner loop may be incorrectly recognized
6303 as a biv of the outer loop,
6304 causing code to be moved INTO the inner loop. */
6305 case MEM:
6306 if (loop_invariant_p (loop, x) != 1)
6307 return 0;
6308 case CONST_INT:
6309 case SYMBOL_REF:
6310 case CONST:
6311 /* convert_modes aborts if we try to convert to or from CCmode, so just
6312 exclude that case. It is very unlikely that a condition code value
6313 would be a useful iterator anyways. convert_modes aborts if we try to
6314 convert a float mode to non-float or vice versa too. */
6315 if (loop->level == 1
6316 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6317 && GET_MODE_CLASS (mode) != MODE_CC)
6319 /* Possible bug here? Perhaps we don't know the mode of X. */
6320 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6321 *mult_val = const0_rtx;
6322 return 1;
6324 else
6325 return 0;
6327 case SIGN_EXTEND:
6328 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6329 dest_reg, p, inc_val, mult_val, location);
6331 case ASHIFTRT:
6332 /* Similar, since this can be a sign extension. */
6333 for (insn = PREV_INSN (p);
6334 (insn && GET_CODE (insn) == NOTE
6335 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6336 insn = PREV_INSN (insn))
6339 if (insn)
6340 set = single_set (insn);
6342 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6343 && set && SET_DEST (set) == XEXP (x, 0)
6344 && GET_CODE (XEXP (x, 1)) == CONST_INT
6345 && INTVAL (XEXP (x, 1)) >= 0
6346 && GET_CODE (SET_SRC (set)) == ASHIFT
6347 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6348 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6349 GET_MODE (XEXP (x, 0)),
6350 dest_reg, insn, inc_val, mult_val,
6351 location);
6352 return 0;
6354 default:
6355 return 0;
6359 /* A general induction variable (giv) is any quantity that is a linear
6360 function of a basic induction variable,
6361 i.e. giv = biv * mult_val + add_val.
6362 The coefficients can be any loop invariant quantity.
6363 A giv need not be computed directly from the biv;
6364 it can be computed by way of other givs. */
6366 /* Determine whether X computes a giv.
6367 If it does, return a nonzero value
6368 which is the benefit from eliminating the computation of X;
6369 set *SRC_REG to the register of the biv that it is computed from;
6370 set *ADD_VAL and *MULT_VAL to the coefficients,
6371 such that the value of X is biv * mult + add; */
6373 static int
6374 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6375 is_addr, pbenefit, addr_mode)
6376 const struct loop *loop;
6377 rtx x;
6378 rtx *src_reg;
6379 rtx *add_val;
6380 rtx *mult_val;
6381 rtx *ext_val;
6382 int is_addr;
6383 int *pbenefit;
6384 enum machine_mode addr_mode;
6386 struct loop_ivs *ivs = LOOP_IVS (loop);
6387 rtx orig_x = x;
6389 /* If this is an invariant, forget it, it isn't a giv. */
6390 if (loop_invariant_p (loop, x) == 1)
6391 return 0;
6393 *pbenefit = 0;
6394 *ext_val = NULL_RTX;
6395 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6396 if (x == 0)
6397 return 0;
6399 switch (GET_CODE (x))
6401 case USE:
6402 case CONST_INT:
6403 /* Since this is now an invariant and wasn't before, it must be a giv
6404 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6405 with. */
6406 *src_reg = ivs->list->biv->dest_reg;
6407 *mult_val = const0_rtx;
6408 *add_val = x;
6409 break;
6411 case REG:
6412 /* This is equivalent to a BIV. */
6413 *src_reg = x;
6414 *mult_val = const1_rtx;
6415 *add_val = const0_rtx;
6416 break;
6418 case PLUS:
6419 /* Either (plus (biv) (invar)) or
6420 (plus (mult (biv) (invar_1)) (invar_2)). */
6421 if (GET_CODE (XEXP (x, 0)) == MULT)
6423 *src_reg = XEXP (XEXP (x, 0), 0);
6424 *mult_val = XEXP (XEXP (x, 0), 1);
6426 else
6428 *src_reg = XEXP (x, 0);
6429 *mult_val = const1_rtx;
6431 *add_val = XEXP (x, 1);
6432 break;
6434 case MULT:
6435 /* ADD_VAL is zero. */
6436 *src_reg = XEXP (x, 0);
6437 *mult_val = XEXP (x, 1);
6438 *add_val = const0_rtx;
6439 break;
6441 default:
6442 abort ();
6445 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6446 unless they are CONST_INT). */
6447 if (GET_CODE (*add_val) == USE)
6448 *add_val = XEXP (*add_val, 0);
6449 if (GET_CODE (*mult_val) == USE)
6450 *mult_val = XEXP (*mult_val, 0);
6452 if (is_addr)
6453 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6454 else
6455 *pbenefit += rtx_cost (orig_x, SET);
6457 /* Always return true if this is a giv so it will be detected as such,
6458 even if the benefit is zero or negative. This allows elimination
6459 of bivs that might otherwise not be eliminated. */
6460 return 1;
6463 /* Given an expression, X, try to form it as a linear function of a biv.
6464 We will canonicalize it to be of the form
6465 (plus (mult (BIV) (invar_1))
6466 (invar_2))
6467 with possible degeneracies.
6469 The invariant expressions must each be of a form that can be used as a
6470 machine operand. We surround then with a USE rtx (a hack, but localized
6471 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6472 routine; it is the caller's responsibility to strip them.
6474 If no such canonicalization is possible (i.e., two biv's are used or an
6475 expression that is neither invariant nor a biv or giv), this routine
6476 returns 0.
6478 For a non-zero return, the result will have a code of CONST_INT, USE,
6479 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6481 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6483 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6484 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6486 static rtx
6487 simplify_giv_expr (loop, x, ext_val, benefit)
6488 const struct loop *loop;
6489 rtx x;
6490 rtx *ext_val;
6491 int *benefit;
6493 struct loop_ivs *ivs = LOOP_IVS (loop);
6494 struct loop_regs *regs = LOOP_REGS (loop);
6495 enum machine_mode mode = GET_MODE (x);
6496 rtx arg0, arg1;
6497 rtx tem;
6499 /* If this is not an integer mode, or if we cannot do arithmetic in this
6500 mode, this can't be a giv. */
6501 if (mode != VOIDmode
6502 && (GET_MODE_CLASS (mode) != MODE_INT
6503 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6504 return NULL_RTX;
6506 switch (GET_CODE (x))
6508 case PLUS:
6509 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6510 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6511 if (arg0 == 0 || arg1 == 0)
6512 return NULL_RTX;
6514 /* Put constant last, CONST_INT last if both constant. */
6515 if ((GET_CODE (arg0) == USE
6516 || GET_CODE (arg0) == CONST_INT)
6517 && ! ((GET_CODE (arg0) == USE
6518 && GET_CODE (arg1) == USE)
6519 || GET_CODE (arg1) == CONST_INT))
6520 tem = arg0, arg0 = arg1, arg1 = tem;
6522 /* Handle addition of zero, then addition of an invariant. */
6523 if (arg1 == const0_rtx)
6524 return arg0;
6525 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6526 switch (GET_CODE (arg0))
6528 case CONST_INT:
6529 case USE:
6530 /* Adding two invariants must result in an invariant, so enclose
6531 addition operation inside a USE and return it. */
6532 if (GET_CODE (arg0) == USE)
6533 arg0 = XEXP (arg0, 0);
6534 if (GET_CODE (arg1) == USE)
6535 arg1 = XEXP (arg1, 0);
6537 if (GET_CODE (arg0) == CONST_INT)
6538 tem = arg0, arg0 = arg1, arg1 = tem;
6539 if (GET_CODE (arg1) == CONST_INT)
6540 tem = sge_plus_constant (arg0, arg1);
6541 else
6542 tem = sge_plus (mode, arg0, arg1);
6544 if (GET_CODE (tem) != CONST_INT)
6545 tem = gen_rtx_USE (mode, tem);
6546 return tem;
6548 case REG:
6549 case MULT:
6550 /* biv + invar or mult + invar. Return sum. */
6551 return gen_rtx_PLUS (mode, arg0, arg1);
6553 case PLUS:
6554 /* (a + invar_1) + invar_2. Associate. */
6555 return
6556 simplify_giv_expr (loop,
6557 gen_rtx_PLUS (mode,
6558 XEXP (arg0, 0),
6559 gen_rtx_PLUS (mode,
6560 XEXP (arg0, 1),
6561 arg1)),
6562 ext_val, benefit);
6564 default:
6565 abort ();
6568 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6569 MULT to reduce cases. */
6570 if (GET_CODE (arg0) == REG)
6571 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6572 if (GET_CODE (arg1) == REG)
6573 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6575 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6576 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6577 Recurse to associate the second PLUS. */
6578 if (GET_CODE (arg1) == MULT)
6579 tem = arg0, arg0 = arg1, arg1 = tem;
6581 if (GET_CODE (arg1) == PLUS)
6582 return
6583 simplify_giv_expr (loop,
6584 gen_rtx_PLUS (mode,
6585 gen_rtx_PLUS (mode, arg0,
6586 XEXP (arg1, 0)),
6587 XEXP (arg1, 1)),
6588 ext_val, benefit);
6590 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6591 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6592 return NULL_RTX;
6594 if (!rtx_equal_p (arg0, arg1))
6595 return NULL_RTX;
6597 return simplify_giv_expr (loop,
6598 gen_rtx_MULT (mode,
6599 XEXP (arg0, 0),
6600 gen_rtx_PLUS (mode,
6601 XEXP (arg0, 1),
6602 XEXP (arg1, 1))),
6603 ext_val, benefit);
6605 case MINUS:
6606 /* Handle "a - b" as "a + b * (-1)". */
6607 return simplify_giv_expr (loop,
6608 gen_rtx_PLUS (mode,
6609 XEXP (x, 0),
6610 gen_rtx_MULT (mode,
6611 XEXP (x, 1),
6612 constm1_rtx)),
6613 ext_val, benefit);
6615 case MULT:
6616 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6617 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6618 if (arg0 == 0 || arg1 == 0)
6619 return NULL_RTX;
6621 /* Put constant last, CONST_INT last if both constant. */
6622 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6623 && GET_CODE (arg1) != CONST_INT)
6624 tem = arg0, arg0 = arg1, arg1 = tem;
6626 /* If second argument is not now constant, not giv. */
6627 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6628 return NULL_RTX;
6630 /* Handle multiply by 0 or 1. */
6631 if (arg1 == const0_rtx)
6632 return const0_rtx;
6634 else if (arg1 == const1_rtx)
6635 return arg0;
6637 switch (GET_CODE (arg0))
6639 case REG:
6640 /* biv * invar. Done. */
6641 return gen_rtx_MULT (mode, arg0, arg1);
6643 case CONST_INT:
6644 /* Product of two constants. */
6645 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6647 case USE:
6648 /* invar * invar is a giv, but attempt to simplify it somehow. */
6649 if (GET_CODE (arg1) != CONST_INT)
6650 return NULL_RTX;
6652 arg0 = XEXP (arg0, 0);
6653 if (GET_CODE (arg0) == MULT)
6655 /* (invar_0 * invar_1) * invar_2. Associate. */
6656 return simplify_giv_expr (loop,
6657 gen_rtx_MULT (mode,
6658 XEXP (arg0, 0),
6659 gen_rtx_MULT (mode,
6660 XEXP (arg0,
6662 arg1)),
6663 ext_val, benefit);
6665 /* Porpagate the MULT expressions to the intermost nodes. */
6666 else if (GET_CODE (arg0) == PLUS)
6668 /* (invar_0 + invar_1) * invar_2. Distribute. */
6669 return simplify_giv_expr (loop,
6670 gen_rtx_PLUS (mode,
6671 gen_rtx_MULT (mode,
6672 XEXP (arg0,
6674 arg1),
6675 gen_rtx_MULT (mode,
6676 XEXP (arg0,
6678 arg1)),
6679 ext_val, benefit);
6681 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6683 case MULT:
6684 /* (a * invar_1) * invar_2. Associate. */
6685 return simplify_giv_expr (loop,
6686 gen_rtx_MULT (mode,
6687 XEXP (arg0, 0),
6688 gen_rtx_MULT (mode,
6689 XEXP (arg0, 1),
6690 arg1)),
6691 ext_val, benefit);
6693 case PLUS:
6694 /* (a + invar_1) * invar_2. Distribute. */
6695 return simplify_giv_expr (loop,
6696 gen_rtx_PLUS (mode,
6697 gen_rtx_MULT (mode,
6698 XEXP (arg0, 0),
6699 arg1),
6700 gen_rtx_MULT (mode,
6701 XEXP (arg0, 1),
6702 arg1)),
6703 ext_val, benefit);
6705 default:
6706 abort ();
6709 case ASHIFT:
6710 /* Shift by constant is multiply by power of two. */
6711 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6712 return 0;
6714 return
6715 simplify_giv_expr (loop,
6716 gen_rtx_MULT (mode,
6717 XEXP (x, 0),
6718 GEN_INT ((HOST_WIDE_INT) 1
6719 << INTVAL (XEXP (x, 1)))),
6720 ext_val, benefit);
6722 case NEG:
6723 /* "-a" is "a * (-1)" */
6724 return simplify_giv_expr (loop,
6725 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6726 ext_val, benefit);
6728 case NOT:
6729 /* "~a" is "-a - 1". Silly, but easy. */
6730 return simplify_giv_expr (loop,
6731 gen_rtx_MINUS (mode,
6732 gen_rtx_NEG (mode, XEXP (x, 0)),
6733 const1_rtx),
6734 ext_val, benefit);
6736 case USE:
6737 /* Already in proper form for invariant. */
6738 return x;
6740 case SIGN_EXTEND:
6741 case ZERO_EXTEND:
6742 case TRUNCATE:
6743 /* Conditionally recognize extensions of simple IVs. After we've
6744 computed loop traversal counts and verified the range of the
6745 source IV, we'll reevaluate this as a GIV. */
6746 if (*ext_val == NULL_RTX)
6748 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6749 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6751 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6752 return arg0;
6755 goto do_default;
6757 case REG:
6758 /* If this is a new register, we can't deal with it. */
6759 if (REGNO (x) >= max_reg_before_loop)
6760 return 0;
6762 /* Check for biv or giv. */
6763 switch (REG_IV_TYPE (ivs, REGNO (x)))
6765 case BASIC_INDUCT:
6766 return x;
6767 case GENERAL_INDUCT:
6769 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6771 /* Form expression from giv and add benefit. Ensure this giv
6772 can derive another and subtract any needed adjustment if so. */
6774 /* Increasing the benefit here is risky. The only case in which it
6775 is arguably correct is if this is the only use of V. In other
6776 cases, this will artificially inflate the benefit of the current
6777 giv, and lead to suboptimal code. Thus, it is disabled, since
6778 potentially not reducing an only marginally beneficial giv is
6779 less harmful than reducing many givs that are not really
6780 beneficial. */
6782 rtx single_use = regs->array[REGNO (x)].single_usage;
6783 if (single_use && single_use != const0_rtx)
6784 *benefit += v->benefit;
6787 if (v->cant_derive)
6788 return 0;
6790 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6791 v->src_reg, v->mult_val),
6792 v->add_val);
6794 if (v->derive_adjustment)
6795 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6796 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6797 if (*ext_val)
6799 if (!v->ext_dependent)
6800 return arg0;
6802 else
6804 *ext_val = v->ext_dependent;
6805 return arg0;
6807 return 0;
6810 default:
6811 do_default:
6812 /* If it isn't an induction variable, and it is invariant, we
6813 may be able to simplify things further by looking through
6814 the bits we just moved outside the loop. */
6815 if (loop_invariant_p (loop, x) == 1)
6817 struct movable *m;
6818 struct loop_movables *movables = LOOP_MOVABLES (loop);
6820 for (m = movables->head; m; m = m->next)
6821 if (rtx_equal_p (x, m->set_dest))
6823 /* Ok, we found a match. Substitute and simplify. */
6825 /* If we match another movable, we must use that, as
6826 this one is going away. */
6827 if (m->match)
6828 return simplify_giv_expr (loop, m->match->set_dest,
6829 ext_val, benefit);
6831 /* If consec is non-zero, this is a member of a group of
6832 instructions that were moved together. We handle this
6833 case only to the point of seeking to the last insn and
6834 looking for a REG_EQUAL. Fail if we don't find one. */
6835 if (m->consec != 0)
6837 int i = m->consec;
6838 tem = m->insn;
6841 tem = NEXT_INSN (tem);
6843 while (--i > 0);
6845 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6846 if (tem)
6847 tem = XEXP (tem, 0);
6849 else
6851 tem = single_set (m->insn);
6852 if (tem)
6853 tem = SET_SRC (tem);
6856 if (tem)
6858 /* What we are most interested in is pointer
6859 arithmetic on invariants -- only take
6860 patterns we may be able to do something with. */
6861 if (GET_CODE (tem) == PLUS
6862 || GET_CODE (tem) == MULT
6863 || GET_CODE (tem) == ASHIFT
6864 || GET_CODE (tem) == CONST_INT
6865 || GET_CODE (tem) == SYMBOL_REF)
6867 tem = simplify_giv_expr (loop, tem, ext_val,
6868 benefit);
6869 if (tem)
6870 return tem;
6872 else if (GET_CODE (tem) == CONST
6873 && GET_CODE (XEXP (tem, 0)) == PLUS
6874 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6875 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6877 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6878 ext_val, benefit);
6879 if (tem)
6880 return tem;
6883 break;
6886 break;
6889 /* Fall through to general case. */
6890 default:
6891 /* If invariant, return as USE (unless CONST_INT).
6892 Otherwise, not giv. */
6893 if (GET_CODE (x) == USE)
6894 x = XEXP (x, 0);
6896 if (loop_invariant_p (loop, x) == 1)
6898 if (GET_CODE (x) == CONST_INT)
6899 return x;
6900 if (GET_CODE (x) == CONST
6901 && GET_CODE (XEXP (x, 0)) == PLUS
6902 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6903 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6904 x = XEXP (x, 0);
6905 return gen_rtx_USE (mode, x);
6907 else
6908 return 0;
6912 /* This routine folds invariants such that there is only ever one
6913 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6915 static rtx
6916 sge_plus_constant (x, c)
6917 rtx x, c;
6919 if (GET_CODE (x) == CONST_INT)
6920 return GEN_INT (INTVAL (x) + INTVAL (c));
6921 else if (GET_CODE (x) != PLUS)
6922 return gen_rtx_PLUS (GET_MODE (x), x, c);
6923 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6925 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6926 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6928 else if (GET_CODE (XEXP (x, 0)) == PLUS
6929 || GET_CODE (XEXP (x, 1)) != PLUS)
6931 return gen_rtx_PLUS (GET_MODE (x),
6932 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6934 else
6936 return gen_rtx_PLUS (GET_MODE (x),
6937 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6941 static rtx
6942 sge_plus (mode, x, y)
6943 enum machine_mode mode;
6944 rtx x, y;
6946 while (GET_CODE (y) == PLUS)
6948 rtx a = XEXP (y, 0);
6949 if (GET_CODE (a) == CONST_INT)
6950 x = sge_plus_constant (x, a);
6951 else
6952 x = gen_rtx_PLUS (mode, x, a);
6953 y = XEXP (y, 1);
6955 if (GET_CODE (y) == CONST_INT)
6956 x = sge_plus_constant (x, y);
6957 else
6958 x = gen_rtx_PLUS (mode, x, y);
6959 return x;
6962 /* Help detect a giv that is calculated by several consecutive insns;
6963 for example,
6964 giv = biv * M
6965 giv = giv + A
6966 The caller has already identified the first insn P as having a giv as dest;
6967 we check that all other insns that set the same register follow
6968 immediately after P, that they alter nothing else,
6969 and that the result of the last is still a giv.
6971 The value is 0 if the reg set in P is not really a giv.
6972 Otherwise, the value is the amount gained by eliminating
6973 all the consecutive insns that compute the value.
6975 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6976 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6978 The coefficients of the ultimate giv value are stored in
6979 *MULT_VAL and *ADD_VAL. */
6981 static int
6982 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6983 add_val, mult_val, ext_val, last_consec_insn)
6984 const struct loop *loop;
6985 int first_benefit;
6986 rtx p;
6987 rtx src_reg;
6988 rtx dest_reg;
6989 rtx *add_val;
6990 rtx *mult_val;
6991 rtx *ext_val;
6992 rtx *last_consec_insn;
6994 struct loop_ivs *ivs = LOOP_IVS (loop);
6995 struct loop_regs *regs = LOOP_REGS (loop);
6996 int count;
6997 enum rtx_code code;
6998 int benefit;
6999 rtx temp;
7000 rtx set;
7002 /* Indicate that this is a giv so that we can update the value produced in
7003 each insn of the multi-insn sequence.
7005 This induction structure will be used only by the call to
7006 general_induction_var below, so we can allocate it on our stack.
7007 If this is a giv, our caller will replace the induct var entry with
7008 a new induction structure. */
7009 struct induction *v;
7011 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7012 return 0;
7014 v = (struct induction *) alloca (sizeof (struct induction));
7015 v->src_reg = src_reg;
7016 v->mult_val = *mult_val;
7017 v->add_val = *add_val;
7018 v->benefit = first_benefit;
7019 v->cant_derive = 0;
7020 v->derive_adjustment = 0;
7021 v->ext_dependent = NULL_RTX;
7023 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7024 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7026 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7028 while (count > 0)
7030 p = NEXT_INSN (p);
7031 code = GET_CODE (p);
7033 /* If libcall, skip to end of call sequence. */
7034 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7035 p = XEXP (temp, 0);
7037 if (code == INSN
7038 && (set = single_set (p))
7039 && GET_CODE (SET_DEST (set)) == REG
7040 && SET_DEST (set) == dest_reg
7041 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7042 add_val, mult_val, ext_val, 0,
7043 &benefit, VOIDmode)
7044 /* Giv created by equivalent expression. */
7045 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7046 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7047 add_val, mult_val, ext_val, 0,
7048 &benefit, VOIDmode)))
7049 && src_reg == v->src_reg)
7051 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7052 benefit += libcall_benefit (p);
7054 count--;
7055 v->mult_val = *mult_val;
7056 v->add_val = *add_val;
7057 v->benefit += benefit;
7059 else if (code != NOTE)
7061 /* Allow insns that set something other than this giv to a
7062 constant. Such insns are needed on machines which cannot
7063 include long constants and should not disqualify a giv. */
7064 if (code == INSN
7065 && (set = single_set (p))
7066 && SET_DEST (set) != dest_reg
7067 && CONSTANT_P (SET_SRC (set)))
7068 continue;
7070 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7071 return 0;
7075 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7076 *last_consec_insn = p;
7077 return v->benefit;
7080 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7081 represented by G1. If no such expression can be found, or it is clear that
7082 it cannot possibly be a valid address, 0 is returned.
7084 To perform the computation, we note that
7085 G1 = x * v + a and
7086 G2 = y * v + b
7087 where `v' is the biv.
7089 So G2 = (y/b) * G1 + (b - a*y/x).
7091 Note that MULT = y/x.
7093 Update: A and B are now allowed to be additive expressions such that
7094 B contains all variables in A. That is, computing B-A will not require
7095 subtracting variables. */
7097 static rtx
7098 express_from_1 (a, b, mult)
7099 rtx a, b, mult;
7101 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7103 if (mult == const0_rtx)
7104 return b;
7106 /* If MULT is not 1, we cannot handle A with non-constants, since we
7107 would then be required to subtract multiples of the registers in A.
7108 This is theoretically possible, and may even apply to some Fortran
7109 constructs, but it is a lot of work and we do not attempt it here. */
7111 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7112 return NULL_RTX;
7114 /* In general these structures are sorted top to bottom (down the PLUS
7115 chain), but not left to right across the PLUS. If B is a higher
7116 order giv than A, we can strip one level and recurse. If A is higher
7117 order, we'll eventually bail out, but won't know that until the end.
7118 If they are the same, we'll strip one level around this loop. */
7120 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7122 rtx ra, rb, oa, ob, tmp;
7124 ra = XEXP (a, 0), oa = XEXP (a, 1);
7125 if (GET_CODE (ra) == PLUS)
7126 tmp = ra, ra = oa, oa = tmp;
7128 rb = XEXP (b, 0), ob = XEXP (b, 1);
7129 if (GET_CODE (rb) == PLUS)
7130 tmp = rb, rb = ob, ob = tmp;
7132 if (rtx_equal_p (ra, rb))
7133 /* We matched: remove one reg completely. */
7134 a = oa, b = ob;
7135 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7136 /* An alternate match. */
7137 a = oa, b = rb;
7138 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7139 /* An alternate match. */
7140 a = ra, b = ob;
7141 else
7143 /* Indicates an extra register in B. Strip one level from B and
7144 recurse, hoping B was the higher order expression. */
7145 ob = express_from_1 (a, ob, mult);
7146 if (ob == NULL_RTX)
7147 return NULL_RTX;
7148 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7152 /* Here we are at the last level of A, go through the cases hoping to
7153 get rid of everything but a constant. */
7155 if (GET_CODE (a) == PLUS)
7157 rtx ra, oa;
7159 ra = XEXP (a, 0), oa = XEXP (a, 1);
7160 if (rtx_equal_p (oa, b))
7161 oa = ra;
7162 else if (!rtx_equal_p (ra, b))
7163 return NULL_RTX;
7165 if (GET_CODE (oa) != CONST_INT)
7166 return NULL_RTX;
7168 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7170 else if (GET_CODE (a) == CONST_INT)
7172 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7174 else if (CONSTANT_P (a))
7176 enum machine_mode mode_a = GET_MODE (a);
7177 enum machine_mode mode_b = GET_MODE (b);
7178 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7179 return simplify_gen_binary (MINUS, mode, b, a);
7181 else if (GET_CODE (b) == PLUS)
7183 if (rtx_equal_p (a, XEXP (b, 0)))
7184 return XEXP (b, 1);
7185 else if (rtx_equal_p (a, XEXP (b, 1)))
7186 return XEXP (b, 0);
7187 else
7188 return NULL_RTX;
7190 else if (rtx_equal_p (a, b))
7191 return const0_rtx;
7193 return NULL_RTX;
7197 express_from (g1, g2)
7198 struct induction *g1, *g2;
7200 rtx mult, add;
7202 /* The value that G1 will be multiplied by must be a constant integer. Also,
7203 the only chance we have of getting a valid address is if b*c/a (see above
7204 for notation) is also an integer. */
7205 if (GET_CODE (g1->mult_val) == CONST_INT
7206 && GET_CODE (g2->mult_val) == CONST_INT)
7208 if (g1->mult_val == const0_rtx
7209 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7210 return NULL_RTX;
7211 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7213 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7214 mult = const1_rtx;
7215 else
7217 /* ??? Find out if the one is a multiple of the other? */
7218 return NULL_RTX;
7221 add = express_from_1 (g1->add_val, g2->add_val, mult);
7222 if (add == NULL_RTX)
7224 /* Failed. If we've got a multiplication factor between G1 and G2,
7225 scale G1's addend and try again. */
7226 if (INTVAL (mult) > 1)
7228 rtx g1_add_val = g1->add_val;
7229 if (GET_CODE (g1_add_val) == MULT
7230 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7232 HOST_WIDE_INT m;
7233 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7234 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7235 XEXP (g1_add_val, 0), GEN_INT (m));
7237 else
7239 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7240 mult);
7243 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7246 if (add == NULL_RTX)
7247 return NULL_RTX;
7249 /* Form simplified final result. */
7250 if (mult == const0_rtx)
7251 return add;
7252 else if (mult == const1_rtx)
7253 mult = g1->dest_reg;
7254 else
7255 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7257 if (add == const0_rtx)
7258 return mult;
7259 else
7261 if (GET_CODE (add) == PLUS
7262 && CONSTANT_P (XEXP (add, 1)))
7264 rtx tem = XEXP (add, 1);
7265 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7266 add = tem;
7269 return gen_rtx_PLUS (g2->mode, mult, add);
7273 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7274 represented by G1. This indicates that G2 should be combined with G1 and
7275 that G2 can use (either directly or via an address expression) a register
7276 used to represent G1. */
7278 static rtx
7279 combine_givs_p (g1, g2)
7280 struct induction *g1, *g2;
7282 rtx comb, ret;
7284 /* With the introduction of ext dependent givs, we must care for modes.
7285 G2 must not use a wider mode than G1. */
7286 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7287 return NULL_RTX;
7289 ret = comb = express_from (g1, g2);
7290 if (comb == NULL_RTX)
7291 return NULL_RTX;
7292 if (g1->mode != g2->mode)
7293 ret = gen_lowpart (g2->mode, comb);
7295 /* If these givs are identical, they can be combined. We use the results
7296 of express_from because the addends are not in a canonical form, so
7297 rtx_equal_p is a weaker test. */
7298 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7299 combination to be the other way round. */
7300 if (comb == g1->dest_reg
7301 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7303 return ret;
7306 /* If G2 can be expressed as a function of G1 and that function is valid
7307 as an address and no more expensive than using a register for G2,
7308 the expression of G2 in terms of G1 can be used. */
7309 if (ret != NULL_RTX
7310 && g2->giv_type == DEST_ADDR
7311 && memory_address_p (GET_MODE (g2->mem), ret)
7312 /* ??? Looses, especially with -fforce-addr, where *g2->location
7313 will always be a register, and so anything more complicated
7314 gets discarded. */
7315 #if 0
7316 #ifdef ADDRESS_COST
7317 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7318 #else
7319 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7320 #endif
7321 #endif
7324 return ret;
7327 return NULL_RTX;
7330 /* Check each extension dependent giv in this class to see if its
7331 root biv is safe from wrapping in the interior mode, which would
7332 make the giv illegal. */
7334 static void
7335 check_ext_dependent_givs (bl, loop_info)
7336 struct iv_class *bl;
7337 struct loop_info *loop_info;
7339 int ze_ok = 0, se_ok = 0, info_ok = 0;
7340 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7341 HOST_WIDE_INT start_val;
7342 unsigned HOST_WIDE_INT u_end_val = 0;
7343 unsigned HOST_WIDE_INT u_start_val = 0;
7344 rtx incr = pc_rtx;
7345 struct induction *v;
7347 /* Make sure the iteration data is available. We must have
7348 constants in order to be certain of no overflow. */
7349 /* ??? An unknown iteration count with an increment of +-1
7350 combined with friendly exit tests of against an invariant
7351 value is also ameanable to optimization. Not implemented. */
7352 if (loop_info->n_iterations > 0
7353 && bl->initial_value
7354 && GET_CODE (bl->initial_value) == CONST_INT
7355 && (incr = biv_total_increment (bl))
7356 && GET_CODE (incr) == CONST_INT
7357 /* Make sure the host can represent the arithmetic. */
7358 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7360 unsigned HOST_WIDE_INT abs_incr, total_incr;
7361 HOST_WIDE_INT s_end_val;
7362 int neg_incr;
7364 info_ok = 1;
7365 start_val = INTVAL (bl->initial_value);
7366 u_start_val = start_val;
7368 neg_incr = 0, abs_incr = INTVAL (incr);
7369 if (INTVAL (incr) < 0)
7370 neg_incr = 1, abs_incr = -abs_incr;
7371 total_incr = abs_incr * loop_info->n_iterations;
7373 /* Check for host arithmatic overflow. */
7374 if (total_incr / loop_info->n_iterations == abs_incr)
7376 unsigned HOST_WIDE_INT u_max;
7377 HOST_WIDE_INT s_max;
7379 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7380 s_end_val = u_end_val;
7381 u_max = GET_MODE_MASK (biv_mode);
7382 s_max = u_max >> 1;
7384 /* Check zero extension of biv ok. */
7385 if (start_val >= 0
7386 /* Check for host arithmatic overflow. */
7387 && (neg_incr
7388 ? u_end_val < u_start_val
7389 : u_end_val > u_start_val)
7390 /* Check for target arithmetic overflow. */
7391 && (neg_incr
7392 ? 1 /* taken care of with host overflow */
7393 : u_end_val <= u_max))
7395 ze_ok = 1;
7398 /* Check sign extension of biv ok. */
7399 /* ??? While it is true that overflow with signed and pointer
7400 arithmetic is undefined, I fear too many programmers don't
7401 keep this fact in mind -- myself included on occasion.
7402 So leave alone with the signed overflow optimizations. */
7403 if (start_val >= -s_max - 1
7404 /* Check for host arithmatic overflow. */
7405 && (neg_incr
7406 ? s_end_val < start_val
7407 : s_end_val > start_val)
7408 /* Check for target arithmetic overflow. */
7409 && (neg_incr
7410 ? s_end_val >= -s_max - 1
7411 : s_end_val <= s_max))
7413 se_ok = 1;
7418 /* Invalidate givs that fail the tests. */
7419 for (v = bl->giv; v; v = v->next_iv)
7420 if (v->ext_dependent)
7422 enum rtx_code code = GET_CODE (v->ext_dependent);
7423 int ok = 0;
7425 switch (code)
7427 case SIGN_EXTEND:
7428 ok = se_ok;
7429 break;
7430 case ZERO_EXTEND:
7431 ok = ze_ok;
7432 break;
7434 case TRUNCATE:
7435 /* We don't know whether this value is being used as either
7436 signed or unsigned, so to safely truncate we must satisfy
7437 both. The initial check here verifies the BIV itself;
7438 once that is successful we may check its range wrt the
7439 derived GIV. */
7440 if (se_ok && ze_ok)
7442 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7443 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7445 /* We know from the above that both endpoints are nonnegative,
7446 and that there is no wrapping. Verify that both endpoints
7447 are within the (signed) range of the outer mode. */
7448 if (u_start_val <= max && u_end_val <= max)
7449 ok = 1;
7451 break;
7453 default:
7454 abort ();
7457 if (ok)
7459 if (loop_dump_stream)
7461 fprintf (loop_dump_stream,
7462 "Verified ext dependent giv at %d of reg %d\n",
7463 INSN_UID (v->insn), bl->regno);
7466 else
7468 if (loop_dump_stream)
7470 const char *why;
7472 if (info_ok)
7473 why = "biv iteration values overflowed";
7474 else
7476 if (incr == pc_rtx)
7477 incr = biv_total_increment (bl);
7478 if (incr == const1_rtx)
7479 why = "biv iteration info incomplete; incr by 1";
7480 else
7481 why = "biv iteration info incomplete";
7484 fprintf (loop_dump_stream,
7485 "Failed ext dependent giv at %d, %s\n",
7486 INSN_UID (v->insn), why);
7488 v->ignore = 1;
7489 bl->all_reduced = 0;
7494 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7497 extend_value_for_giv (v, value)
7498 struct induction *v;
7499 rtx value;
7501 rtx ext_dep = v->ext_dependent;
7503 if (! ext_dep)
7504 return value;
7506 /* Recall that check_ext_dependent_givs verified that the known bounds
7507 of a biv did not overflow or wrap with respect to the extension for
7508 the giv. Therefore, constants need no additional adjustment. */
7509 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7510 return value;
7512 /* Otherwise, we must adjust the value to compensate for the
7513 differing modes of the biv and the giv. */
7514 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7517 struct combine_givs_stats
7519 int giv_number;
7520 int total_benefit;
7523 static int
7524 cmp_combine_givs_stats (xp, yp)
7525 const PTR xp;
7526 const PTR yp;
7528 const struct combine_givs_stats * const x =
7529 (const struct combine_givs_stats *) xp;
7530 const struct combine_givs_stats * const y =
7531 (const struct combine_givs_stats *) yp;
7532 int d;
7533 d = y->total_benefit - x->total_benefit;
7534 /* Stabilize the sort. */
7535 if (!d)
7536 d = x->giv_number - y->giv_number;
7537 return d;
7540 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7541 any other. If so, point SAME to the giv combined with and set NEW_REG to
7542 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7543 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7545 static void
7546 combine_givs (regs, bl)
7547 struct loop_regs *regs;
7548 struct iv_class *bl;
7550 /* Additional benefit to add for being combined multiple times. */
7551 const int extra_benefit = 3;
7553 struct induction *g1, *g2, **giv_array;
7554 int i, j, k, giv_count;
7555 struct combine_givs_stats *stats;
7556 rtx *can_combine;
7558 /* Count givs, because bl->giv_count is incorrect here. */
7559 giv_count = 0;
7560 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7561 if (!g1->ignore)
7562 giv_count++;
7564 giv_array
7565 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7566 i = 0;
7567 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7568 if (!g1->ignore)
7569 giv_array[i++] = g1;
7571 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7572 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7574 for (i = 0; i < giv_count; i++)
7576 int this_benefit;
7577 rtx single_use;
7579 g1 = giv_array[i];
7580 stats[i].giv_number = i;
7582 /* If a DEST_REG GIV is used only once, do not allow it to combine
7583 with anything, for in doing so we will gain nothing that cannot
7584 be had by simply letting the GIV with which we would have combined
7585 to be reduced on its own. The losage shows up in particular with
7586 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7587 be seen elsewhere as well. */
7588 if (g1->giv_type == DEST_REG
7589 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7590 && single_use != const0_rtx)
7591 continue;
7593 this_benefit = g1->benefit;
7594 /* Add an additional weight for zero addends. */
7595 if (g1->no_const_addval)
7596 this_benefit += 1;
7598 for (j = 0; j < giv_count; j++)
7600 rtx this_combine;
7602 g2 = giv_array[j];
7603 if (g1 != g2
7604 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7606 can_combine[i * giv_count + j] = this_combine;
7607 this_benefit += g2->benefit + extra_benefit;
7610 stats[i].total_benefit = this_benefit;
7613 /* Iterate, combining until we can't. */
7614 restart:
7615 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7617 if (loop_dump_stream)
7619 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7620 for (k = 0; k < giv_count; k++)
7622 g1 = giv_array[stats[k].giv_number];
7623 if (!g1->combined_with && !g1->same)
7624 fprintf (loop_dump_stream, " {%d, %d}",
7625 INSN_UID (giv_array[stats[k].giv_number]->insn),
7626 stats[k].total_benefit);
7628 putc ('\n', loop_dump_stream);
7631 for (k = 0; k < giv_count; k++)
7633 int g1_add_benefit = 0;
7635 i = stats[k].giv_number;
7636 g1 = giv_array[i];
7638 /* If it has already been combined, skip. */
7639 if (g1->combined_with || g1->same)
7640 continue;
7642 for (j = 0; j < giv_count; j++)
7644 g2 = giv_array[j];
7645 if (g1 != g2 && can_combine[i * giv_count + j]
7646 /* If it has already been combined, skip. */
7647 && ! g2->same && ! g2->combined_with)
7649 int l;
7651 g2->new_reg = can_combine[i * giv_count + j];
7652 g2->same = g1;
7653 /* For destination, we now may replace by mem expression instead
7654 of register. This changes the costs considerably, so add the
7655 compensation. */
7656 if (g2->giv_type == DEST_ADDR)
7657 g2->benefit = (g2->benefit + reg_address_cost
7658 - address_cost (g2->new_reg,
7659 GET_MODE (g2->mem)));
7660 g1->combined_with++;
7661 g1->lifetime += g2->lifetime;
7663 g1_add_benefit += g2->benefit;
7665 /* ??? The new final_[bg]iv_value code does a much better job
7666 of finding replaceable giv's, and hence this code may no
7667 longer be necessary. */
7668 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7669 g1_add_benefit -= copy_cost;
7671 /* To help optimize the next set of combinations, remove
7672 this giv from the benefits of other potential mates. */
7673 for (l = 0; l < giv_count; ++l)
7675 int m = stats[l].giv_number;
7676 if (can_combine[m * giv_count + j])
7677 stats[l].total_benefit -= g2->benefit + extra_benefit;
7680 if (loop_dump_stream)
7681 fprintf (loop_dump_stream,
7682 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7683 INSN_UID (g2->insn), INSN_UID (g1->insn),
7684 g1->benefit, g1_add_benefit, g1->lifetime);
7688 /* To help optimize the next set of combinations, remove
7689 this giv from the benefits of other potential mates. */
7690 if (g1->combined_with)
7692 for (j = 0; j < giv_count; ++j)
7694 int m = stats[j].giv_number;
7695 if (can_combine[m * giv_count + i])
7696 stats[j].total_benefit -= g1->benefit + extra_benefit;
7699 g1->benefit += g1_add_benefit;
7701 /* We've finished with this giv, and everything it touched.
7702 Restart the combination so that proper weights for the
7703 rest of the givs are properly taken into account. */
7704 /* ??? Ideally we would compact the arrays at this point, so
7705 as to not cover old ground. But sanely compacting
7706 can_combine is tricky. */
7707 goto restart;
7711 /* Clean up. */
7712 free (stats);
7713 free (can_combine);
7716 /* Generate sequence for REG = B * M + A. */
7718 static rtx
7719 gen_add_mult (b, m, a, reg)
7720 rtx b; /* initial value of basic induction variable */
7721 rtx m; /* multiplicative constant */
7722 rtx a; /* additive constant */
7723 rtx reg; /* destination register */
7725 rtx seq;
7726 rtx result;
7728 start_sequence ();
7729 /* Use unsigned arithmetic. */
7730 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7731 if (reg != result)
7732 emit_move_insn (reg, result);
7733 seq = get_insns ();
7734 end_sequence ();
7736 return seq;
7740 /* Update registers created in insn sequence SEQ. */
7742 static void
7743 loop_regs_update (loop, seq)
7744 const struct loop *loop ATTRIBUTE_UNUSED;
7745 rtx seq;
7747 rtx insn;
7749 /* Update register info for alias analysis. */
7751 if (seq == NULL_RTX)
7752 return;
7754 if (INSN_P (seq))
7756 insn = seq;
7757 while (insn != NULL_RTX)
7759 rtx set = single_set (insn);
7761 if (set && GET_CODE (SET_DEST (set)) == REG)
7762 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7764 insn = NEXT_INSN (insn);
7767 else if (GET_CODE (seq) == SET
7768 && GET_CODE (SET_DEST (seq)) == REG)
7769 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7773 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7775 void
7776 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7777 const struct loop *loop;
7778 rtx b; /* initial value of basic induction variable */
7779 rtx m; /* multiplicative constant */
7780 rtx a; /* additive constant */
7781 rtx reg; /* destination register */
7782 basic_block before_bb;
7783 rtx before_insn;
7785 rtx seq;
7787 if (! before_insn)
7789 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7790 return;
7793 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7794 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7796 /* Increase the lifetime of any invariants moved further in code. */
7797 update_reg_last_use (a, before_insn);
7798 update_reg_last_use (b, before_insn);
7799 update_reg_last_use (m, before_insn);
7801 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7803 /* It is possible that the expansion created lots of new registers.
7804 Iterate over the sequence we just created and record them all. */
7805 loop_regs_update (loop, seq);
7809 /* Emit insns in loop pre-header to set REG = B * M + A. */
7811 void
7812 loop_iv_add_mult_sink (loop, b, m, a, reg)
7813 const struct loop *loop;
7814 rtx b; /* initial value of basic induction variable */
7815 rtx m; /* multiplicative constant */
7816 rtx a; /* additive constant */
7817 rtx reg; /* destination register */
7819 rtx seq;
7821 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7822 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7824 /* Increase the lifetime of any invariants moved further in code.
7825 ???? Is this really necessary? */
7826 update_reg_last_use (a, loop->sink);
7827 update_reg_last_use (b, loop->sink);
7828 update_reg_last_use (m, loop->sink);
7830 loop_insn_sink (loop, seq);
7832 /* It is possible that the expansion created lots of new registers.
7833 Iterate over the sequence we just created and record them all. */
7834 loop_regs_update (loop, seq);
7838 /* Emit insns after loop to set REG = B * M + A. */
7840 void
7841 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7842 const struct loop *loop;
7843 rtx b; /* initial value of basic induction variable */
7844 rtx m; /* multiplicative constant */
7845 rtx a; /* additive constant */
7846 rtx reg; /* destination register */
7848 rtx seq;
7850 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7851 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7853 loop_insn_hoist (loop, seq);
7855 /* It is possible that the expansion created lots of new registers.
7856 Iterate over the sequence we just created and record them all. */
7857 loop_regs_update (loop, seq);
7862 /* Similar to gen_add_mult, but compute cost rather than generating
7863 sequence. */
7865 static int
7866 iv_add_mult_cost (b, m, a, reg)
7867 rtx b; /* initial value of basic induction variable */
7868 rtx m; /* multiplicative constant */
7869 rtx a; /* additive constant */
7870 rtx reg; /* destination register */
7872 int cost = 0;
7873 rtx last, result;
7875 start_sequence ();
7876 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7877 if (reg != result)
7878 emit_move_insn (reg, result);
7879 last = get_last_insn ();
7880 while (last)
7882 rtx t = single_set (last);
7883 if (t)
7884 cost += rtx_cost (SET_SRC (t), SET);
7885 last = PREV_INSN (last);
7887 end_sequence ();
7888 return cost;
7891 /* Test whether A * B can be computed without
7892 an actual multiply insn. Value is 1 if so.
7894 ??? This function stinks because it generates a ton of wasted RTL
7895 ??? and as a result fragments GC memory to no end. There are other
7896 ??? places in the compiler which are invoked a lot and do the same
7897 ??? thing, generate wasted RTL just to see if something is possible. */
7899 static int
7900 product_cheap_p (a, b)
7901 rtx a;
7902 rtx b;
7904 rtx tmp;
7905 int win, n_insns;
7907 /* If only one is constant, make it B. */
7908 if (GET_CODE (a) == CONST_INT)
7909 tmp = a, a = b, b = tmp;
7911 /* If first constant, both constant, so don't need multiply. */
7912 if (GET_CODE (a) == CONST_INT)
7913 return 1;
7915 /* If second not constant, neither is constant, so would need multiply. */
7916 if (GET_CODE (b) != CONST_INT)
7917 return 0;
7919 /* One operand is constant, so might not need multiply insn. Generate the
7920 code for the multiply and see if a call or multiply, or long sequence
7921 of insns is generated. */
7923 start_sequence ();
7924 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7925 tmp = get_insns ();
7926 end_sequence ();
7928 win = 1;
7929 if (INSN_P (tmp))
7931 n_insns = 0;
7932 while (tmp != NULL_RTX)
7934 rtx next = NEXT_INSN (tmp);
7936 if (++n_insns > 3
7937 || GET_CODE (tmp) != INSN
7938 || (GET_CODE (PATTERN (tmp)) == SET
7939 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7940 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7941 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7942 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7944 win = 0;
7945 break;
7948 tmp = next;
7951 else if (GET_CODE (tmp) == SET
7952 && GET_CODE (SET_SRC (tmp)) == MULT)
7953 win = 0;
7954 else if (GET_CODE (tmp) == PARALLEL
7955 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7956 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7957 win = 0;
7959 return win;
7962 /* Check to see if loop can be terminated by a "decrement and branch until
7963 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7964 Also try reversing an increment loop to a decrement loop
7965 to see if the optimization can be performed.
7966 Value is nonzero if optimization was performed. */
7968 /* This is useful even if the architecture doesn't have such an insn,
7969 because it might change a loops which increments from 0 to n to a loop
7970 which decrements from n to 0. A loop that decrements to zero is usually
7971 faster than one that increments from zero. */
7973 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7974 such as approx_final_value, biv_total_increment, loop_iterations, and
7975 final_[bg]iv_value. */
7977 static int
7978 check_dbra_loop (loop, insn_count)
7979 struct loop *loop;
7980 int insn_count;
7982 struct loop_info *loop_info = LOOP_INFO (loop);
7983 struct loop_regs *regs = LOOP_REGS (loop);
7984 struct loop_ivs *ivs = LOOP_IVS (loop);
7985 struct iv_class *bl;
7986 rtx reg;
7987 rtx jump_label;
7988 rtx final_value;
7989 rtx start_value;
7990 rtx new_add_val;
7991 rtx comparison;
7992 rtx before_comparison;
7993 rtx p;
7994 rtx jump;
7995 rtx first_compare;
7996 int compare_and_branch;
7997 rtx loop_start = loop->start;
7998 rtx loop_end = loop->end;
8000 /* If last insn is a conditional branch, and the insn before tests a
8001 register value, try to optimize it. Otherwise, we can't do anything. */
8003 jump = PREV_INSN (loop_end);
8004 comparison = get_condition_for_loop (loop, jump);
8005 if (comparison == 0)
8006 return 0;
8007 if (!onlyjump_p (jump))
8008 return 0;
8010 /* Try to compute whether the compare/branch at the loop end is one or
8011 two instructions. */
8012 get_condition (jump, &first_compare);
8013 if (first_compare == jump)
8014 compare_and_branch = 1;
8015 else if (first_compare == prev_nonnote_insn (jump))
8016 compare_and_branch = 2;
8017 else
8018 return 0;
8021 /* If more than one condition is present to control the loop, then
8022 do not proceed, as this function does not know how to rewrite
8023 loop tests with more than one condition.
8025 Look backwards from the first insn in the last comparison
8026 sequence and see if we've got another comparison sequence. */
8028 rtx jump1;
8029 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8030 if (GET_CODE (jump1) == JUMP_INSN)
8031 return 0;
8034 /* Check all of the bivs to see if the compare uses one of them.
8035 Skip biv's set more than once because we can't guarantee that
8036 it will be zero on the last iteration. Also skip if the biv is
8037 used between its update and the test insn. */
8039 for (bl = ivs->list; bl; bl = bl->next)
8041 if (bl->biv_count == 1
8042 && ! bl->biv->maybe_multiple
8043 && bl->biv->dest_reg == XEXP (comparison, 0)
8044 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8045 first_compare))
8046 break;
8049 if (! bl)
8050 return 0;
8052 /* Look for the case where the basic induction variable is always
8053 nonnegative, and equals zero on the last iteration.
8054 In this case, add a reg_note REG_NONNEG, which allows the
8055 m68k DBRA instruction to be used. */
8057 if (((GET_CODE (comparison) == GT
8058 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8059 && INTVAL (XEXP (comparison, 1)) == -1)
8060 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8061 && GET_CODE (bl->biv->add_val) == CONST_INT
8062 && INTVAL (bl->biv->add_val) < 0)
8064 /* Initial value must be greater than 0,
8065 init_val % -dec_value == 0 to ensure that it equals zero on
8066 the last iteration */
8068 if (GET_CODE (bl->initial_value) == CONST_INT
8069 && INTVAL (bl->initial_value) > 0
8070 && (INTVAL (bl->initial_value)
8071 % (-INTVAL (bl->biv->add_val))) == 0)
8073 /* register always nonnegative, add REG_NOTE to branch */
8074 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8075 REG_NOTES (jump)
8076 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8077 REG_NOTES (jump));
8078 bl->nonneg = 1;
8080 return 1;
8083 /* If the decrement is 1 and the value was tested as >= 0 before
8084 the loop, then we can safely optimize. */
8085 for (p = loop_start; p; p = PREV_INSN (p))
8087 if (GET_CODE (p) == CODE_LABEL)
8088 break;
8089 if (GET_CODE (p) != JUMP_INSN)
8090 continue;
8092 before_comparison = get_condition_for_loop (loop, p);
8093 if (before_comparison
8094 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8095 && GET_CODE (before_comparison) == LT
8096 && XEXP (before_comparison, 1) == const0_rtx
8097 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8098 && INTVAL (bl->biv->add_val) == -1)
8100 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8101 REG_NOTES (jump)
8102 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8103 REG_NOTES (jump));
8104 bl->nonneg = 1;
8106 return 1;
8110 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8111 && INTVAL (bl->biv->add_val) > 0)
8113 /* Try to change inc to dec, so can apply above optimization. */
8114 /* Can do this if:
8115 all registers modified are induction variables or invariant,
8116 all memory references have non-overlapping addresses
8117 (obviously true if only one write)
8118 allow 2 insns for the compare/jump at the end of the loop. */
8119 /* Also, we must avoid any instructions which use both the reversed
8120 biv and another biv. Such instructions will fail if the loop is
8121 reversed. We meet this condition by requiring that either
8122 no_use_except_counting is true, or else that there is only
8123 one biv. */
8124 int num_nonfixed_reads = 0;
8125 /* 1 if the iteration var is used only to count iterations. */
8126 int no_use_except_counting = 0;
8127 /* 1 if the loop has no memory store, or it has a single memory store
8128 which is reversible. */
8129 int reversible_mem_store = 1;
8131 if (bl->giv_count == 0
8132 && !loop->exit_count
8133 && !loop_info->has_multiple_exit_targets)
8135 rtx bivreg = regno_reg_rtx[bl->regno];
8136 struct iv_class *blt;
8138 /* If there are no givs for this biv, and the only exit is the
8139 fall through at the end of the loop, then
8140 see if perhaps there are no uses except to count. */
8141 no_use_except_counting = 1;
8142 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8143 if (INSN_P (p))
8145 rtx set = single_set (p);
8147 if (set && GET_CODE (SET_DEST (set)) == REG
8148 && REGNO (SET_DEST (set)) == bl->regno)
8149 /* An insn that sets the biv is okay. */
8151 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8152 || p == prev_nonnote_insn (loop_end))
8153 && reg_mentioned_p (bivreg, PATTERN (p)))
8155 /* If either of these insns uses the biv and sets a pseudo
8156 that has more than one usage, then the biv has uses
8157 other than counting since it's used to derive a value
8158 that is used more than one time. */
8159 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8160 regs);
8161 if (regs->multiple_uses)
8163 no_use_except_counting = 0;
8164 break;
8167 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8169 no_use_except_counting = 0;
8170 break;
8174 /* A biv has uses besides counting if it is used to set
8175 another biv. */
8176 for (blt = ivs->list; blt; blt = blt->next)
8177 if (blt->init_set
8178 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8180 no_use_except_counting = 0;
8181 break;
8185 if (no_use_except_counting)
8186 /* No need to worry about MEMs. */
8188 else if (loop_info->num_mem_sets <= 1)
8190 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8191 if (INSN_P (p))
8192 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8194 /* If the loop has a single store, and the destination address is
8195 invariant, then we can't reverse the loop, because this address
8196 might then have the wrong value at loop exit.
8197 This would work if the source was invariant also, however, in that
8198 case, the insn should have been moved out of the loop. */
8200 if (loop_info->num_mem_sets == 1)
8202 struct induction *v;
8204 /* If we could prove that each of the memory locations
8205 written to was different, then we could reverse the
8206 store -- but we don't presently have any way of
8207 knowing that. */
8208 reversible_mem_store = 0;
8210 /* If the store depends on a register that is set after the
8211 store, it depends on the initial value, and is thus not
8212 reversible. */
8213 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8215 if (v->giv_type == DEST_REG
8216 && reg_mentioned_p (v->dest_reg,
8217 PATTERN (loop_info->first_loop_store_insn))
8218 && loop_insn_first_p (loop_info->first_loop_store_insn,
8219 v->insn))
8220 reversible_mem_store = 0;
8224 else
8225 return 0;
8227 /* This code only acts for innermost loops. Also it simplifies
8228 the memory address check by only reversing loops with
8229 zero or one memory access.
8230 Two memory accesses could involve parts of the same array,
8231 and that can't be reversed.
8232 If the biv is used only for counting, than we don't need to worry
8233 about all these things. */
8235 if ((num_nonfixed_reads <= 1
8236 && ! loop_info->has_nonconst_call
8237 && ! loop_info->has_prefetch
8238 && ! loop_info->has_volatile
8239 && reversible_mem_store
8240 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8241 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8242 && (bl == ivs->list && bl->next == 0))
8243 || (no_use_except_counting && ! loop_info->has_prefetch))
8245 rtx tem;
8247 /* Loop can be reversed. */
8248 if (loop_dump_stream)
8249 fprintf (loop_dump_stream, "Can reverse loop\n");
8251 /* Now check other conditions:
8253 The increment must be a constant, as must the initial value,
8254 and the comparison code must be LT.
8256 This test can probably be improved since +/- 1 in the constant
8257 can be obtained by changing LT to LE and vice versa; this is
8258 confusing. */
8260 if (comparison
8261 /* for constants, LE gets turned into LT */
8262 && (GET_CODE (comparison) == LT
8263 || (GET_CODE (comparison) == LE
8264 && no_use_except_counting)))
8266 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8267 rtx initial_value, comparison_value;
8268 int nonneg = 0;
8269 enum rtx_code cmp_code;
8270 int comparison_const_width;
8271 unsigned HOST_WIDE_INT comparison_sign_mask;
8273 add_val = INTVAL (bl->biv->add_val);
8274 comparison_value = XEXP (comparison, 1);
8275 if (GET_MODE (comparison_value) == VOIDmode)
8276 comparison_const_width
8277 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8278 else
8279 comparison_const_width
8280 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8281 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8282 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8283 comparison_sign_mask
8284 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8286 /* If the comparison value is not a loop invariant, then we
8287 can not reverse this loop.
8289 ??? If the insns which initialize the comparison value as
8290 a whole compute an invariant result, then we could move
8291 them out of the loop and proceed with loop reversal. */
8292 if (! loop_invariant_p (loop, comparison_value))
8293 return 0;
8295 if (GET_CODE (comparison_value) == CONST_INT)
8296 comparison_val = INTVAL (comparison_value);
8297 initial_value = bl->initial_value;
8299 /* Normalize the initial value if it is an integer and
8300 has no other use except as a counter. This will allow
8301 a few more loops to be reversed. */
8302 if (no_use_except_counting
8303 && GET_CODE (comparison_value) == CONST_INT
8304 && GET_CODE (initial_value) == CONST_INT)
8306 comparison_val = comparison_val - INTVAL (bl->initial_value);
8307 /* The code below requires comparison_val to be a multiple
8308 of add_val in order to do the loop reversal, so
8309 round up comparison_val to a multiple of add_val.
8310 Since comparison_value is constant, we know that the
8311 current comparison code is LT. */
8312 comparison_val = comparison_val + add_val - 1;
8313 comparison_val
8314 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8315 /* We postpone overflow checks for COMPARISON_VAL here;
8316 even if there is an overflow, we might still be able to
8317 reverse the loop, if converting the loop exit test to
8318 NE is possible. */
8319 initial_value = const0_rtx;
8322 /* First check if we can do a vanilla loop reversal. */
8323 if (initial_value == const0_rtx
8324 /* If we have a decrement_and_branch_on_count,
8325 prefer the NE test, since this will allow that
8326 instruction to be generated. Note that we must
8327 use a vanilla loop reversal if the biv is used to
8328 calculate a giv or has a non-counting use. */
8329 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8330 && defined (HAVE_decrement_and_branch_on_count)
8331 && (! (add_val == 1 && loop->vtop
8332 && (bl->biv_count == 0
8333 || no_use_except_counting)))
8334 #endif
8335 && GET_CODE (comparison_value) == CONST_INT
8336 /* Now do postponed overflow checks on COMPARISON_VAL. */
8337 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8338 & comparison_sign_mask))
8340 /* Register will always be nonnegative, with value
8341 0 on last iteration */
8342 add_adjust = add_val;
8343 nonneg = 1;
8344 cmp_code = GE;
8346 else if (add_val == 1 && loop->vtop
8347 && (bl->biv_count == 0
8348 || no_use_except_counting))
8350 add_adjust = 0;
8351 cmp_code = NE;
8353 else
8354 return 0;
8356 if (GET_CODE (comparison) == LE)
8357 add_adjust -= add_val;
8359 /* If the initial value is not zero, or if the comparison
8360 value is not an exact multiple of the increment, then we
8361 can not reverse this loop. */
8362 if (initial_value == const0_rtx
8363 && GET_CODE (comparison_value) == CONST_INT)
8365 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8366 return 0;
8368 else
8370 if (! no_use_except_counting || add_val != 1)
8371 return 0;
8374 final_value = comparison_value;
8376 /* Reset these in case we normalized the initial value
8377 and comparison value above. */
8378 if (GET_CODE (comparison_value) == CONST_INT
8379 && GET_CODE (initial_value) == CONST_INT)
8381 comparison_value = GEN_INT (comparison_val);
8382 final_value
8383 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8385 bl->initial_value = initial_value;
8387 /* Save some info needed to produce the new insns. */
8388 reg = bl->biv->dest_reg;
8389 jump_label = condjump_label (PREV_INSN (loop_end));
8390 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8392 /* Set start_value; if this is not a CONST_INT, we need
8393 to generate a SUB.
8394 Initialize biv to start_value before loop start.
8395 The old initializing insn will be deleted as a
8396 dead store by flow.c. */
8397 if (initial_value == const0_rtx
8398 && GET_CODE (comparison_value) == CONST_INT)
8400 start_value = GEN_INT (comparison_val - add_adjust);
8401 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8403 else if (GET_CODE (initial_value) == CONST_INT)
8405 enum machine_mode mode = GET_MODE (reg);
8406 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8407 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8409 if (add_insn == 0)
8410 return 0;
8412 start_value
8413 = gen_rtx_PLUS (mode, comparison_value, offset);
8414 loop_insn_hoist (loop, add_insn);
8415 if (GET_CODE (comparison) == LE)
8416 final_value = gen_rtx_PLUS (mode, comparison_value,
8417 GEN_INT (add_val));
8419 else if (! add_adjust)
8421 enum machine_mode mode = GET_MODE (reg);
8422 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8423 initial_value);
8425 if (sub_insn == 0)
8426 return 0;
8427 start_value
8428 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8429 loop_insn_hoist (loop, sub_insn);
8431 else
8432 /* We could handle the other cases too, but it'll be
8433 better to have a testcase first. */
8434 return 0;
8436 /* We may not have a single insn which can increment a reg, so
8437 create a sequence to hold all the insns from expand_inc. */
8438 start_sequence ();
8439 expand_inc (reg, new_add_val);
8440 tem = get_insns ();
8441 end_sequence ();
8443 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8444 delete_insn (bl->biv->insn);
8446 /* Update biv info to reflect its new status. */
8447 bl->biv->insn = p;
8448 bl->initial_value = start_value;
8449 bl->biv->add_val = new_add_val;
8451 /* Update loop info. */
8452 loop_info->initial_value = reg;
8453 loop_info->initial_equiv_value = reg;
8454 loop_info->final_value = const0_rtx;
8455 loop_info->final_equiv_value = const0_rtx;
8456 loop_info->comparison_value = const0_rtx;
8457 loop_info->comparison_code = cmp_code;
8458 loop_info->increment = new_add_val;
8460 /* Inc LABEL_NUSES so that delete_insn will
8461 not delete the label. */
8462 LABEL_NUSES (XEXP (jump_label, 0))++;
8464 /* Emit an insn after the end of the loop to set the biv's
8465 proper exit value if it is used anywhere outside the loop. */
8466 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8467 || ! bl->init_insn
8468 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8469 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8471 /* Delete compare/branch at end of loop. */
8472 delete_related_insns (PREV_INSN (loop_end));
8473 if (compare_and_branch == 2)
8474 delete_related_insns (first_compare);
8476 /* Add new compare/branch insn at end of loop. */
8477 start_sequence ();
8478 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8479 GET_MODE (reg), 0,
8480 XEXP (jump_label, 0));
8481 tem = get_insns ();
8482 end_sequence ();
8483 emit_jump_insn_before (tem, loop_end);
8485 for (tem = PREV_INSN (loop_end);
8486 tem && GET_CODE (tem) != JUMP_INSN;
8487 tem = PREV_INSN (tem))
8490 if (tem)
8491 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8493 if (nonneg)
8495 if (tem)
8497 /* Increment of LABEL_NUSES done above. */
8498 /* Register is now always nonnegative,
8499 so add REG_NONNEG note to the branch. */
8500 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8501 REG_NOTES (tem));
8503 bl->nonneg = 1;
8506 /* No insn may reference both the reversed and another biv or it
8507 will fail (see comment near the top of the loop reversal
8508 code).
8509 Earlier on, we have verified that the biv has no use except
8510 counting, or it is the only biv in this function.
8511 However, the code that computes no_use_except_counting does
8512 not verify reg notes. It's possible to have an insn that
8513 references another biv, and has a REG_EQUAL note with an
8514 expression based on the reversed biv. To avoid this case,
8515 remove all REG_EQUAL notes based on the reversed biv
8516 here. */
8517 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8518 if (INSN_P (p))
8520 rtx *pnote;
8521 rtx set = single_set (p);
8522 /* If this is a set of a GIV based on the reversed biv, any
8523 REG_EQUAL notes should still be correct. */
8524 if (! set
8525 || GET_CODE (SET_DEST (set)) != REG
8526 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8527 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8528 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8529 for (pnote = &REG_NOTES (p); *pnote;)
8531 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8532 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8533 XEXP (*pnote, 0)))
8534 *pnote = XEXP (*pnote, 1);
8535 else
8536 pnote = &XEXP (*pnote, 1);
8540 /* Mark that this biv has been reversed. Each giv which depends
8541 on this biv, and which is also live past the end of the loop
8542 will have to be fixed up. */
8544 bl->reversed = 1;
8546 if (loop_dump_stream)
8548 fprintf (loop_dump_stream, "Reversed loop");
8549 if (bl->nonneg)
8550 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8551 else
8552 fprintf (loop_dump_stream, "\n");
8555 return 1;
8560 return 0;
8563 /* Verify whether the biv BL appears to be eliminable,
8564 based on the insns in the loop that refer to it.
8566 If ELIMINATE_P is non-zero, actually do the elimination.
8568 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8569 determine whether invariant insns should be placed inside or at the
8570 start of the loop. */
8572 static int
8573 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8574 const struct loop *loop;
8575 struct iv_class *bl;
8576 int eliminate_p;
8577 int threshold, insn_count;
8579 struct loop_ivs *ivs = LOOP_IVS (loop);
8580 rtx reg = bl->biv->dest_reg;
8581 rtx p;
8583 /* Scan all insns in the loop, stopping if we find one that uses the
8584 biv in a way that we cannot eliminate. */
8586 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8588 enum rtx_code code = GET_CODE (p);
8589 basic_block where_bb = 0;
8590 rtx where_insn = threshold >= insn_count ? 0 : p;
8592 /* If this is a libcall that sets a giv, skip ahead to its end. */
8593 if (GET_RTX_CLASS (code) == 'i')
8595 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8597 if (note)
8599 rtx last = XEXP (note, 0);
8600 rtx set = single_set (last);
8602 if (set && GET_CODE (SET_DEST (set)) == REG)
8604 unsigned int regno = REGNO (SET_DEST (set));
8606 if (regno < ivs->n_regs
8607 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8608 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8609 p = last;
8613 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8614 && reg_mentioned_p (reg, PATTERN (p))
8615 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8616 eliminate_p, where_bb, where_insn))
8618 if (loop_dump_stream)
8619 fprintf (loop_dump_stream,
8620 "Cannot eliminate biv %d: biv used in insn %d.\n",
8621 bl->regno, INSN_UID (p));
8622 break;
8626 if (p == loop->end)
8628 if (loop_dump_stream)
8629 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8630 bl->regno, eliminate_p ? "was" : "can be");
8631 return 1;
8634 return 0;
8637 /* INSN and REFERENCE are instructions in the same insn chain.
8638 Return non-zero if INSN is first. */
8641 loop_insn_first_p (insn, reference)
8642 rtx insn, reference;
8644 rtx p, q;
8646 for (p = insn, q = reference;;)
8648 /* Start with test for not first so that INSN == REFERENCE yields not
8649 first. */
8650 if (q == insn || ! p)
8651 return 0;
8652 if (p == reference || ! q)
8653 return 1;
8655 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8656 previous insn, hence the <= comparison below does not work if
8657 P is a note. */
8658 if (INSN_UID (p) < max_uid_for_loop
8659 && INSN_UID (q) < max_uid_for_loop
8660 && GET_CODE (p) != NOTE)
8661 return INSN_LUID (p) <= INSN_LUID (q);
8663 if (INSN_UID (p) >= max_uid_for_loop
8664 || GET_CODE (p) == NOTE)
8665 p = NEXT_INSN (p);
8666 if (INSN_UID (q) >= max_uid_for_loop)
8667 q = NEXT_INSN (q);
8671 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8672 the offset that we have to take into account due to auto-increment /
8673 div derivation is zero. */
8674 static int
8675 biv_elimination_giv_has_0_offset (biv, giv, insn)
8676 struct induction *biv, *giv;
8677 rtx insn;
8679 /* If the giv V had the auto-inc address optimization applied
8680 to it, and INSN occurs between the giv insn and the biv
8681 insn, then we'd have to adjust the value used here.
8682 This is rare, so we don't bother to make this possible. */
8683 if (giv->auto_inc_opt
8684 && ((loop_insn_first_p (giv->insn, insn)
8685 && loop_insn_first_p (insn, biv->insn))
8686 || (loop_insn_first_p (biv->insn, insn)
8687 && loop_insn_first_p (insn, giv->insn))))
8688 return 0;
8690 return 1;
8693 /* If BL appears in X (part of the pattern of INSN), see if we can
8694 eliminate its use. If so, return 1. If not, return 0.
8696 If BIV does not appear in X, return 1.
8698 If ELIMINATE_P is non-zero, actually do the elimination.
8699 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8700 Depending on how many items have been moved out of the loop, it
8701 will either be before INSN (when WHERE_INSN is non-zero) or at the
8702 start of the loop (when WHERE_INSN is zero). */
8704 static int
8705 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8706 const struct loop *loop;
8707 rtx x, insn;
8708 struct iv_class *bl;
8709 int eliminate_p;
8710 basic_block where_bb;
8711 rtx where_insn;
8713 enum rtx_code code = GET_CODE (x);
8714 rtx reg = bl->biv->dest_reg;
8715 enum machine_mode mode = GET_MODE (reg);
8716 struct induction *v;
8717 rtx arg, tem;
8718 #ifdef HAVE_cc0
8719 rtx new;
8720 #endif
8721 int arg_operand;
8722 const char *fmt;
8723 int i, j;
8725 switch (code)
8727 case REG:
8728 /* If we haven't already been able to do something with this BIV,
8729 we can't eliminate it. */
8730 if (x == reg)
8731 return 0;
8732 return 1;
8734 case SET:
8735 /* If this sets the BIV, it is not a problem. */
8736 if (SET_DEST (x) == reg)
8737 return 1;
8739 /* If this is an insn that defines a giv, it is also ok because
8740 it will go away when the giv is reduced. */
8741 for (v = bl->giv; v; v = v->next_iv)
8742 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8743 return 1;
8745 #ifdef HAVE_cc0
8746 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8748 /* Can replace with any giv that was reduced and
8749 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8750 Require a constant for MULT_VAL, so we know it's nonzero.
8751 ??? We disable this optimization to avoid potential
8752 overflows. */
8754 for (v = bl->giv; v; v = v->next_iv)
8755 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8756 && v->add_val == const0_rtx
8757 && ! v->ignore && ! v->maybe_dead && v->always_computable
8758 && v->mode == mode
8759 && 0)
8761 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8762 continue;
8764 if (! eliminate_p)
8765 return 1;
8767 /* If the giv has the opposite direction of change,
8768 then reverse the comparison. */
8769 if (INTVAL (v->mult_val) < 0)
8770 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8771 const0_rtx, v->new_reg);
8772 else
8773 new = v->new_reg;
8775 /* We can probably test that giv's reduced reg. */
8776 if (validate_change (insn, &SET_SRC (x), new, 0))
8777 return 1;
8780 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8781 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8782 Require a constant for MULT_VAL, so we know it's nonzero.
8783 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8784 overflow problem. */
8786 for (v = bl->giv; v; v = v->next_iv)
8787 if (GET_CODE (v->mult_val) == CONST_INT
8788 && v->mult_val != const0_rtx
8789 && ! v->ignore && ! v->maybe_dead && v->always_computable
8790 && v->mode == mode
8791 && (GET_CODE (v->add_val) == SYMBOL_REF
8792 || GET_CODE (v->add_val) == LABEL_REF
8793 || GET_CODE (v->add_val) == CONST
8794 || (GET_CODE (v->add_val) == REG
8795 && REG_POINTER (v->add_val))))
8797 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8798 continue;
8800 if (! eliminate_p)
8801 return 1;
8803 /* If the giv has the opposite direction of change,
8804 then reverse the comparison. */
8805 if (INTVAL (v->mult_val) < 0)
8806 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8807 v->new_reg);
8808 else
8809 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8810 copy_rtx (v->add_val));
8812 /* Replace biv with the giv's reduced register. */
8813 update_reg_last_use (v->add_val, insn);
8814 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8815 return 1;
8817 /* Insn doesn't support that constant or invariant. Copy it
8818 into a register (it will be a loop invariant.) */
8819 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8821 loop_insn_emit_before (loop, 0, where_insn,
8822 gen_move_insn (tem,
8823 copy_rtx (v->add_val)));
8825 /* Substitute the new register for its invariant value in
8826 the compare expression. */
8827 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8828 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8829 return 1;
8832 #endif
8833 break;
8835 case COMPARE:
8836 case EQ: case NE:
8837 case GT: case GE: case GTU: case GEU:
8838 case LT: case LE: case LTU: case LEU:
8839 /* See if either argument is the biv. */
8840 if (XEXP (x, 0) == reg)
8841 arg = XEXP (x, 1), arg_operand = 1;
8842 else if (XEXP (x, 1) == reg)
8843 arg = XEXP (x, 0), arg_operand = 0;
8844 else
8845 break;
8847 if (CONSTANT_P (arg))
8849 /* First try to replace with any giv that has constant positive
8850 mult_val and constant add_val. We might be able to support
8851 negative mult_val, but it seems complex to do it in general. */
8853 for (v = bl->giv; v; v = v->next_iv)
8854 if (GET_CODE (v->mult_val) == CONST_INT
8855 && INTVAL (v->mult_val) > 0
8856 && (GET_CODE (v->add_val) == SYMBOL_REF
8857 || GET_CODE (v->add_val) == LABEL_REF
8858 || GET_CODE (v->add_val) == CONST
8859 || (GET_CODE (v->add_val) == REG
8860 && REG_POINTER (v->add_val)))
8861 && ! v->ignore && ! v->maybe_dead && v->always_computable
8862 && v->mode == mode)
8864 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8865 continue;
8867 /* Don't eliminate if the linear combination that makes up
8868 the giv overflows when it is applied to ARG. */
8869 if (GET_CODE (arg) == CONST_INT)
8871 rtx add_val;
8873 if (GET_CODE (v->add_val) == CONST_INT)
8874 add_val = v->add_val;
8875 else
8876 add_val = const0_rtx;
8878 if (const_mult_add_overflow_p (arg, v->mult_val,
8879 add_val, mode, 1))
8880 continue;
8883 if (! eliminate_p)
8884 return 1;
8886 /* Replace biv with the giv's reduced reg. */
8887 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8889 /* If all constants are actually constant integers and
8890 the derived constant can be directly placed in the COMPARE,
8891 do so. */
8892 if (GET_CODE (arg) == CONST_INT
8893 && GET_CODE (v->add_val) == CONST_INT)
8895 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8896 v->add_val, mode, 1);
8898 else
8900 /* Otherwise, load it into a register. */
8901 tem = gen_reg_rtx (mode);
8902 loop_iv_add_mult_emit_before (loop, arg,
8903 v->mult_val, v->add_val,
8904 tem, where_bb, where_insn);
8907 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8909 if (apply_change_group ())
8910 return 1;
8913 /* Look for giv with positive constant mult_val and nonconst add_val.
8914 Insert insns to calculate new compare value.
8915 ??? Turn this off due to possible overflow. */
8917 for (v = bl->giv; v; v = v->next_iv)
8918 if (GET_CODE (v->mult_val) == CONST_INT
8919 && INTVAL (v->mult_val) > 0
8920 && ! v->ignore && ! v->maybe_dead && v->always_computable
8921 && v->mode == mode
8922 && 0)
8924 rtx tem;
8926 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8927 continue;
8929 if (! eliminate_p)
8930 return 1;
8932 tem = gen_reg_rtx (mode);
8934 /* Replace biv with giv's reduced register. */
8935 validate_change (insn, &XEXP (x, 1 - arg_operand),
8936 v->new_reg, 1);
8938 /* Compute value to compare against. */
8939 loop_iv_add_mult_emit_before (loop, arg,
8940 v->mult_val, v->add_val,
8941 tem, where_bb, where_insn);
8942 /* Use it in this insn. */
8943 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8944 if (apply_change_group ())
8945 return 1;
8948 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8950 if (loop_invariant_p (loop, arg) == 1)
8952 /* Look for giv with constant positive mult_val and nonconst
8953 add_val. Insert insns to compute new compare value.
8954 ??? Turn this off due to possible overflow. */
8956 for (v = bl->giv; v; v = v->next_iv)
8957 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8958 && ! v->ignore && ! v->maybe_dead && v->always_computable
8959 && v->mode == mode
8960 && 0)
8962 rtx tem;
8964 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8965 continue;
8967 if (! eliminate_p)
8968 return 1;
8970 tem = gen_reg_rtx (mode);
8972 /* Replace biv with giv's reduced register. */
8973 validate_change (insn, &XEXP (x, 1 - arg_operand),
8974 v->new_reg, 1);
8976 /* Compute value to compare against. */
8977 loop_iv_add_mult_emit_before (loop, arg,
8978 v->mult_val, v->add_val,
8979 tem, where_bb, where_insn);
8980 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8981 if (apply_change_group ())
8982 return 1;
8986 /* This code has problems. Basically, you can't know when
8987 seeing if we will eliminate BL, whether a particular giv
8988 of ARG will be reduced. If it isn't going to be reduced,
8989 we can't eliminate BL. We can try forcing it to be reduced,
8990 but that can generate poor code.
8992 The problem is that the benefit of reducing TV, below should
8993 be increased if BL can actually be eliminated, but this means
8994 we might have to do a topological sort of the order in which
8995 we try to process biv. It doesn't seem worthwhile to do
8996 this sort of thing now. */
8998 #if 0
8999 /* Otherwise the reg compared with had better be a biv. */
9000 if (GET_CODE (arg) != REG
9001 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9002 return 0;
9004 /* Look for a pair of givs, one for each biv,
9005 with identical coefficients. */
9006 for (v = bl->giv; v; v = v->next_iv)
9008 struct induction *tv;
9010 if (v->ignore || v->maybe_dead || v->mode != mode)
9011 continue;
9013 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9014 tv = tv->next_iv)
9015 if (! tv->ignore && ! tv->maybe_dead
9016 && rtx_equal_p (tv->mult_val, v->mult_val)
9017 && rtx_equal_p (tv->add_val, v->add_val)
9018 && tv->mode == mode)
9020 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9021 continue;
9023 if (! eliminate_p)
9024 return 1;
9026 /* Replace biv with its giv's reduced reg. */
9027 XEXP (x, 1 - arg_operand) = v->new_reg;
9028 /* Replace other operand with the other giv's
9029 reduced reg. */
9030 XEXP (x, arg_operand) = tv->new_reg;
9031 return 1;
9034 #endif
9037 /* If we get here, the biv can't be eliminated. */
9038 return 0;
9040 case MEM:
9041 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9042 biv is used in it, since it will be replaced. */
9043 for (v = bl->giv; v; v = v->next_iv)
9044 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9045 return 1;
9046 break;
9048 default:
9049 break;
9052 /* See if any subexpression fails elimination. */
9053 fmt = GET_RTX_FORMAT (code);
9054 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9056 switch (fmt[i])
9058 case 'e':
9059 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9060 eliminate_p, where_bb, where_insn))
9061 return 0;
9062 break;
9064 case 'E':
9065 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9066 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9067 eliminate_p, where_bb, where_insn))
9068 return 0;
9069 break;
9073 return 1;
9076 /* Return nonzero if the last use of REG
9077 is in an insn following INSN in the same basic block. */
9079 static int
9080 last_use_this_basic_block (reg, insn)
9081 rtx reg;
9082 rtx insn;
9084 rtx n;
9085 for (n = insn;
9086 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9087 n = NEXT_INSN (n))
9089 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9090 return 1;
9092 return 0;
9095 /* Called via `note_stores' to record the initial value of a biv. Here we
9096 just record the location of the set and process it later. */
9098 static void
9099 record_initial (dest, set, data)
9100 rtx dest;
9101 rtx set;
9102 void *data ATTRIBUTE_UNUSED;
9104 struct loop_ivs *ivs = (struct loop_ivs *) data;
9105 struct iv_class *bl;
9107 if (GET_CODE (dest) != REG
9108 || REGNO (dest) >= ivs->n_regs
9109 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9110 return;
9112 bl = REG_IV_CLASS (ivs, REGNO (dest));
9114 /* If this is the first set found, record it. */
9115 if (bl->init_insn == 0)
9117 bl->init_insn = note_insn;
9118 bl->init_set = set;
9122 /* If any of the registers in X are "old" and currently have a last use earlier
9123 than INSN, update them to have a last use of INSN. Their actual last use
9124 will be the previous insn but it will not have a valid uid_luid so we can't
9125 use it. X must be a source expression only. */
9127 static void
9128 update_reg_last_use (x, insn)
9129 rtx x;
9130 rtx insn;
9132 /* Check for the case where INSN does not have a valid luid. In this case,
9133 there is no need to modify the regno_last_uid, as this can only happen
9134 when code is inserted after the loop_end to set a pseudo's final value,
9135 and hence this insn will never be the last use of x.
9136 ???? This comment is not correct. See for example loop_givs_reduce.
9137 This may insert an insn before another new insn. */
9138 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9139 && INSN_UID (insn) < max_uid_for_loop
9140 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9142 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9144 else
9146 int i, j;
9147 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9148 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9150 if (fmt[i] == 'e')
9151 update_reg_last_use (XEXP (x, i), insn);
9152 else if (fmt[i] == 'E')
9153 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9154 update_reg_last_use (XVECEXP (x, i, j), insn);
9159 /* Given an insn INSN and condition COND, return the condition in a
9160 canonical form to simplify testing by callers. Specifically:
9162 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9163 (2) Both operands will be machine operands; (cc0) will have been replaced.
9164 (3) If an operand is a constant, it will be the second operand.
9165 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9166 for GE, GEU, and LEU.
9168 If the condition cannot be understood, or is an inequality floating-point
9169 comparison which needs to be reversed, 0 will be returned.
9171 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9173 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9174 insn used in locating the condition was found. If a replacement test
9175 of the condition is desired, it should be placed in front of that
9176 insn and we will be sure that the inputs are still valid.
9178 If WANT_REG is non-zero, we wish the condition to be relative to that
9179 register, if possible. Therefore, do not canonicalize the condition
9180 further. */
9183 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9184 rtx insn;
9185 rtx cond;
9186 int reverse;
9187 rtx *earliest;
9188 rtx want_reg;
9190 enum rtx_code code;
9191 rtx prev = insn;
9192 rtx set;
9193 rtx tem;
9194 rtx op0, op1;
9195 int reverse_code = 0;
9196 enum machine_mode mode;
9198 code = GET_CODE (cond);
9199 mode = GET_MODE (cond);
9200 op0 = XEXP (cond, 0);
9201 op1 = XEXP (cond, 1);
9203 if (reverse)
9204 code = reversed_comparison_code (cond, insn);
9205 if (code == UNKNOWN)
9206 return 0;
9208 if (earliest)
9209 *earliest = insn;
9211 /* If we are comparing a register with zero, see if the register is set
9212 in the previous insn to a COMPARE or a comparison operation. Perform
9213 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9214 in cse.c */
9216 while (GET_RTX_CLASS (code) == '<'
9217 && op1 == CONST0_RTX (GET_MODE (op0))
9218 && op0 != want_reg)
9220 /* Set non-zero when we find something of interest. */
9221 rtx x = 0;
9223 #ifdef HAVE_cc0
9224 /* If comparison with cc0, import actual comparison from compare
9225 insn. */
9226 if (op0 == cc0_rtx)
9228 if ((prev = prev_nonnote_insn (prev)) == 0
9229 || GET_CODE (prev) != INSN
9230 || (set = single_set (prev)) == 0
9231 || SET_DEST (set) != cc0_rtx)
9232 return 0;
9234 op0 = SET_SRC (set);
9235 op1 = CONST0_RTX (GET_MODE (op0));
9236 if (earliest)
9237 *earliest = prev;
9239 #endif
9241 /* If this is a COMPARE, pick up the two things being compared. */
9242 if (GET_CODE (op0) == COMPARE)
9244 op1 = XEXP (op0, 1);
9245 op0 = XEXP (op0, 0);
9246 continue;
9248 else if (GET_CODE (op0) != REG)
9249 break;
9251 /* Go back to the previous insn. Stop if it is not an INSN. We also
9252 stop if it isn't a single set or if it has a REG_INC note because
9253 we don't want to bother dealing with it. */
9255 if ((prev = prev_nonnote_insn (prev)) == 0
9256 || GET_CODE (prev) != INSN
9257 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9258 break;
9260 set = set_of (op0, prev);
9262 if (set
9263 && (GET_CODE (set) != SET
9264 || !rtx_equal_p (SET_DEST (set), op0)))
9265 break;
9267 /* If this is setting OP0, get what it sets it to if it looks
9268 relevant. */
9269 if (set)
9271 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9273 /* ??? We may not combine comparisons done in a CCmode with
9274 comparisons not done in a CCmode. This is to aid targets
9275 like Alpha that have an IEEE compliant EQ instruction, and
9276 a non-IEEE compliant BEQ instruction. The use of CCmode is
9277 actually artificial, simply to prevent the combination, but
9278 should not affect other platforms.
9280 However, we must allow VOIDmode comparisons to match either
9281 CCmode or non-CCmode comparison, because some ports have
9282 modeless comparisons inside branch patterns.
9284 ??? This mode check should perhaps look more like the mode check
9285 in simplify_comparison in combine. */
9287 if ((GET_CODE (SET_SRC (set)) == COMPARE
9288 || (((code == NE
9289 || (code == LT
9290 && GET_MODE_CLASS (inner_mode) == MODE_INT
9291 && (GET_MODE_BITSIZE (inner_mode)
9292 <= HOST_BITS_PER_WIDE_INT)
9293 && (STORE_FLAG_VALUE
9294 & ((HOST_WIDE_INT) 1
9295 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9296 #ifdef FLOAT_STORE_FLAG_VALUE
9297 || (code == LT
9298 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9299 && (REAL_VALUE_NEGATIVE
9300 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9301 #endif
9303 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9304 && (((GET_MODE_CLASS (mode) == MODE_CC)
9305 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9306 || mode == VOIDmode || inner_mode == VOIDmode))
9307 x = SET_SRC (set);
9308 else if (((code == EQ
9309 || (code == GE
9310 && (GET_MODE_BITSIZE (inner_mode)
9311 <= HOST_BITS_PER_WIDE_INT)
9312 && GET_MODE_CLASS (inner_mode) == MODE_INT
9313 && (STORE_FLAG_VALUE
9314 & ((HOST_WIDE_INT) 1
9315 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9316 #ifdef FLOAT_STORE_FLAG_VALUE
9317 || (code == GE
9318 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9319 && (REAL_VALUE_NEGATIVE
9320 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9321 #endif
9323 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9324 && (((GET_MODE_CLASS (mode) == MODE_CC)
9325 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9326 || mode == VOIDmode || inner_mode == VOIDmode))
9329 reverse_code = 1;
9330 x = SET_SRC (set);
9332 else
9333 break;
9336 else if (reg_set_p (op0, prev))
9337 /* If this sets OP0, but not directly, we have to give up. */
9338 break;
9340 if (x)
9342 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9343 code = GET_CODE (x);
9344 if (reverse_code)
9346 code = reversed_comparison_code (x, prev);
9347 if (code == UNKNOWN)
9348 return 0;
9349 reverse_code = 0;
9352 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9353 if (earliest)
9354 *earliest = prev;
9358 /* If constant is first, put it last. */
9359 if (CONSTANT_P (op0))
9360 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9362 /* If OP0 is the result of a comparison, we weren't able to find what
9363 was really being compared, so fail. */
9364 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9365 return 0;
9367 /* Canonicalize any ordered comparison with integers involving equality
9368 if we can do computations in the relevant mode and we do not
9369 overflow. */
9371 if (GET_CODE (op1) == CONST_INT
9372 && GET_MODE (op0) != VOIDmode
9373 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9375 HOST_WIDE_INT const_val = INTVAL (op1);
9376 unsigned HOST_WIDE_INT uconst_val = const_val;
9377 unsigned HOST_WIDE_INT max_val
9378 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9380 switch (code)
9382 case LE:
9383 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9384 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9385 break;
9387 /* When cross-compiling, const_val might be sign-extended from
9388 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9389 case GE:
9390 if ((HOST_WIDE_INT) (const_val & max_val)
9391 != (((HOST_WIDE_INT) 1
9392 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9393 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9394 break;
9396 case LEU:
9397 if (uconst_val < max_val)
9398 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9399 break;
9401 case GEU:
9402 if (uconst_val != 0)
9403 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9404 break;
9406 default:
9407 break;
9411 #ifdef HAVE_cc0
9412 /* Never return CC0; return zero instead. */
9413 if (op0 == cc0_rtx)
9414 return 0;
9415 #endif
9417 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9420 /* Given a jump insn JUMP, return the condition that will cause it to branch
9421 to its JUMP_LABEL. If the condition cannot be understood, or is an
9422 inequality floating-point comparison which needs to be reversed, 0 will
9423 be returned.
9425 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9426 insn used in locating the condition was found. If a replacement test
9427 of the condition is desired, it should be placed in front of that
9428 insn and we will be sure that the inputs are still valid. */
9431 get_condition (jump, earliest)
9432 rtx jump;
9433 rtx *earliest;
9435 rtx cond;
9436 int reverse;
9437 rtx set;
9439 /* If this is not a standard conditional jump, we can't parse it. */
9440 if (GET_CODE (jump) != JUMP_INSN
9441 || ! any_condjump_p (jump))
9442 return 0;
9443 set = pc_set (jump);
9445 cond = XEXP (SET_SRC (set), 0);
9447 /* If this branches to JUMP_LABEL when the condition is false, reverse
9448 the condition. */
9449 reverse
9450 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9451 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9453 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9456 /* Similar to above routine, except that we also put an invariant last
9457 unless both operands are invariants. */
9460 get_condition_for_loop (loop, x)
9461 const struct loop *loop;
9462 rtx x;
9464 rtx comparison = get_condition (x, (rtx*) 0);
9466 if (comparison == 0
9467 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9468 || loop_invariant_p (loop, XEXP (comparison, 1)))
9469 return comparison;
9471 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9472 XEXP (comparison, 1), XEXP (comparison, 0));
9475 /* Scan the function and determine whether it has indirect (computed) jumps.
9477 This is taken mostly from flow.c; similar code exists elsewhere
9478 in the compiler. It may be useful to put this into rtlanal.c. */
9479 static int
9480 indirect_jump_in_function_p (start)
9481 rtx start;
9483 rtx insn;
9485 for (insn = start; insn; insn = NEXT_INSN (insn))
9486 if (computed_jump_p (insn))
9487 return 1;
9489 return 0;
9492 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9493 documentation for LOOP_MEMS for the definition of `appropriate'.
9494 This function is called from prescan_loop via for_each_rtx. */
9496 static int
9497 insert_loop_mem (mem, data)
9498 rtx *mem;
9499 void *data ATTRIBUTE_UNUSED;
9501 struct loop_info *loop_info = data;
9502 int i;
9503 rtx m = *mem;
9505 if (m == NULL_RTX)
9506 return 0;
9508 switch (GET_CODE (m))
9510 case MEM:
9511 break;
9513 case CLOBBER:
9514 /* We're not interested in MEMs that are only clobbered. */
9515 return -1;
9517 case CONST_DOUBLE:
9518 /* We're not interested in the MEM associated with a
9519 CONST_DOUBLE, so there's no need to traverse into this. */
9520 return -1;
9522 case EXPR_LIST:
9523 /* We're not interested in any MEMs that only appear in notes. */
9524 return -1;
9526 default:
9527 /* This is not a MEM. */
9528 return 0;
9531 /* See if we've already seen this MEM. */
9532 for (i = 0; i < loop_info->mems_idx; ++i)
9533 if (rtx_equal_p (m, loop_info->mems[i].mem))
9535 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9536 /* The modes of the two memory accesses are different. If
9537 this happens, something tricky is going on, and we just
9538 don't optimize accesses to this MEM. */
9539 loop_info->mems[i].optimize = 0;
9541 return 0;
9544 /* Resize the array, if necessary. */
9545 if (loop_info->mems_idx == loop_info->mems_allocated)
9547 if (loop_info->mems_allocated != 0)
9548 loop_info->mems_allocated *= 2;
9549 else
9550 loop_info->mems_allocated = 32;
9552 loop_info->mems = (loop_mem_info *)
9553 xrealloc (loop_info->mems,
9554 loop_info->mems_allocated * sizeof (loop_mem_info));
9557 /* Actually insert the MEM. */
9558 loop_info->mems[loop_info->mems_idx].mem = m;
9559 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9560 because we can't put it in a register. We still store it in the
9561 table, though, so that if we see the same address later, but in a
9562 non-BLK mode, we'll not think we can optimize it at that point. */
9563 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9564 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9565 ++loop_info->mems_idx;
9567 return 0;
9571 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9573 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9574 register that is modified by an insn between FROM and TO. If the
9575 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9576 more, stop incrementing it, to avoid overflow.
9578 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9579 register I is used, if it is only used once. Otherwise, it is set
9580 to 0 (for no uses) or const0_rtx for more than one use. This
9581 parameter may be zero, in which case this processing is not done.
9583 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9584 optimize register I. */
9586 static void
9587 loop_regs_scan (loop, extra_size)
9588 const struct loop *loop;
9589 int extra_size;
9591 struct loop_regs *regs = LOOP_REGS (loop);
9592 int old_nregs;
9593 /* last_set[n] is nonzero iff reg n has been set in the current
9594 basic block. In that case, it is the insn that last set reg n. */
9595 rtx *last_set;
9596 rtx insn;
9597 int i;
9599 old_nregs = regs->num;
9600 regs->num = max_reg_num ();
9602 /* Grow the regs array if not allocated or too small. */
9603 if (regs->num >= regs->size)
9605 regs->size = regs->num + extra_size;
9607 regs->array = (struct loop_reg *)
9608 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9610 /* Zero the new elements. */
9611 memset (regs->array + old_nregs, 0,
9612 (regs->size - old_nregs) * sizeof (*regs->array));
9615 /* Clear previously scanned fields but do not clear n_times_set. */
9616 for (i = 0; i < old_nregs; i++)
9618 regs->array[i].set_in_loop = 0;
9619 regs->array[i].may_not_optimize = 0;
9620 regs->array[i].single_usage = NULL_RTX;
9623 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9625 /* Scan the loop, recording register usage. */
9626 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9627 insn = NEXT_INSN (insn))
9629 if (INSN_P (insn))
9631 /* Record registers that have exactly one use. */
9632 find_single_use_in_loop (regs, insn, PATTERN (insn));
9634 /* Include uses in REG_EQUAL notes. */
9635 if (REG_NOTES (insn))
9636 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9638 if (GET_CODE (PATTERN (insn)) == SET
9639 || GET_CODE (PATTERN (insn)) == CLOBBER)
9640 count_one_set (regs, insn, PATTERN (insn), last_set);
9641 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9643 int i;
9644 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9645 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9646 last_set);
9650 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9651 memset (last_set, 0, regs->num * sizeof (rtx));
9654 /* Invalidate all hard registers clobbered by calls. With one exception:
9655 a call-clobbered PIC register is still function-invariant for our
9656 purposes, since we can hoist any PIC calculations out of the loop.
9657 Thus the call to rtx_varies_p. */
9658 if (LOOP_INFO (loop)->has_call)
9659 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9660 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9661 && rtx_varies_p (regno_reg_rtx[i], 1))
9663 regs->array[i].may_not_optimize = 1;
9664 regs->array[i].set_in_loop = 1;
9667 #ifdef AVOID_CCMODE_COPIES
9668 /* Don't try to move insns which set CC registers if we should not
9669 create CCmode register copies. */
9670 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9671 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9672 regs->array[i].may_not_optimize = 1;
9673 #endif
9675 /* Set regs->array[I].n_times_set for the new registers. */
9676 for (i = old_nregs; i < regs->num; i++)
9677 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9679 free (last_set);
9682 /* Returns the number of real INSNs in the LOOP. */
9684 static int
9685 count_insns_in_loop (loop)
9686 const struct loop *loop;
9688 int count = 0;
9689 rtx insn;
9691 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9692 insn = NEXT_INSN (insn))
9693 if (INSN_P (insn))
9694 ++count;
9696 return count;
9699 /* Move MEMs into registers for the duration of the loop. */
9701 static void
9702 load_mems (loop)
9703 const struct loop *loop;
9705 struct loop_info *loop_info = LOOP_INFO (loop);
9706 struct loop_regs *regs = LOOP_REGS (loop);
9707 int maybe_never = 0;
9708 int i;
9709 rtx p, prev_ebb_head;
9710 rtx label = NULL_RTX;
9711 rtx end_label;
9712 /* Nonzero if the next instruction may never be executed. */
9713 int next_maybe_never = 0;
9714 unsigned int last_max_reg = max_reg_num ();
9716 if (loop_info->mems_idx == 0)
9717 return;
9719 /* We cannot use next_label here because it skips over normal insns. */
9720 end_label = next_nonnote_insn (loop->end);
9721 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9722 end_label = NULL_RTX;
9724 /* Check to see if it's possible that some instructions in the loop are
9725 never executed. Also check if there is a goto out of the loop other
9726 than right after the end of the loop. */
9727 for (p = next_insn_in_loop (loop, loop->scan_start);
9728 p != NULL_RTX;
9729 p = next_insn_in_loop (loop, p))
9731 if (GET_CODE (p) == CODE_LABEL)
9732 maybe_never = 1;
9733 else if (GET_CODE (p) == JUMP_INSN
9734 /* If we enter the loop in the middle, and scan
9735 around to the beginning, don't set maybe_never
9736 for that. This must be an unconditional jump,
9737 otherwise the code at the top of the loop might
9738 never be executed. Unconditional jumps are
9739 followed a by barrier then loop end. */
9740 && ! (GET_CODE (p) == JUMP_INSN
9741 && JUMP_LABEL (p) == loop->top
9742 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9743 && any_uncondjump_p (p)))
9745 /* If this is a jump outside of the loop but not right
9746 after the end of the loop, we would have to emit new fixup
9747 sequences for each such label. */
9748 if (/* If we can't tell where control might go when this
9749 JUMP_INSN is executed, we must be conservative. */
9750 !JUMP_LABEL (p)
9751 || (JUMP_LABEL (p) != end_label
9752 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9753 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9754 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9755 return;
9757 if (!any_condjump_p (p))
9758 /* Something complicated. */
9759 maybe_never = 1;
9760 else
9761 /* If there are any more instructions in the loop, they
9762 might not be reached. */
9763 next_maybe_never = 1;
9765 else if (next_maybe_never)
9766 maybe_never = 1;
9769 /* Find start of the extended basic block that enters the loop. */
9770 for (p = loop->start;
9771 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9772 p = PREV_INSN (p))
9774 prev_ebb_head = p;
9776 cselib_init ();
9778 /* Build table of mems that get set to constant values before the
9779 loop. */
9780 for (; p != loop->start; p = NEXT_INSN (p))
9781 cselib_process_insn (p);
9783 /* Actually move the MEMs. */
9784 for (i = 0; i < loop_info->mems_idx; ++i)
9786 regset_head load_copies;
9787 regset_head store_copies;
9788 int written = 0;
9789 rtx reg;
9790 rtx mem = loop_info->mems[i].mem;
9791 rtx mem_list_entry;
9793 if (MEM_VOLATILE_P (mem)
9794 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9795 /* There's no telling whether or not MEM is modified. */
9796 loop_info->mems[i].optimize = 0;
9798 /* Go through the MEMs written to in the loop to see if this
9799 one is aliased by one of them. */
9800 mem_list_entry = loop_info->store_mems;
9801 while (mem_list_entry)
9803 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9804 written = 1;
9805 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9806 mem, rtx_varies_p))
9808 /* MEM is indeed aliased by this store. */
9809 loop_info->mems[i].optimize = 0;
9810 break;
9812 mem_list_entry = XEXP (mem_list_entry, 1);
9815 if (flag_float_store && written
9816 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9817 loop_info->mems[i].optimize = 0;
9819 /* If this MEM is written to, we must be sure that there
9820 are no reads from another MEM that aliases this one. */
9821 if (loop_info->mems[i].optimize && written)
9823 int j;
9825 for (j = 0; j < loop_info->mems_idx; ++j)
9827 if (j == i)
9828 continue;
9829 else if (true_dependence (mem,
9830 VOIDmode,
9831 loop_info->mems[j].mem,
9832 rtx_varies_p))
9834 /* It's not safe to hoist loop_info->mems[i] out of
9835 the loop because writes to it might not be
9836 seen by reads from loop_info->mems[j]. */
9837 loop_info->mems[i].optimize = 0;
9838 break;
9843 if (maybe_never && may_trap_p (mem))
9844 /* We can't access the MEM outside the loop; it might
9845 cause a trap that wouldn't have happened otherwise. */
9846 loop_info->mems[i].optimize = 0;
9848 if (!loop_info->mems[i].optimize)
9849 /* We thought we were going to lift this MEM out of the
9850 loop, but later discovered that we could not. */
9851 continue;
9853 INIT_REG_SET (&load_copies);
9854 INIT_REG_SET (&store_copies);
9856 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9857 order to keep scan_loop from moving stores to this MEM
9858 out of the loop just because this REG is neither a
9859 user-variable nor used in the loop test. */
9860 reg = gen_reg_rtx (GET_MODE (mem));
9861 REG_USERVAR_P (reg) = 1;
9862 loop_info->mems[i].reg = reg;
9864 /* Now, replace all references to the MEM with the
9865 corresponding pseudos. */
9866 maybe_never = 0;
9867 for (p = next_insn_in_loop (loop, loop->scan_start);
9868 p != NULL_RTX;
9869 p = next_insn_in_loop (loop, p))
9871 if (INSN_P (p))
9873 rtx set;
9875 set = single_set (p);
9877 /* See if this copies the mem into a register that isn't
9878 modified afterwards. We'll try to do copy propagation
9879 a little further on. */
9880 if (set
9881 /* @@@ This test is _way_ too conservative. */
9882 && ! maybe_never
9883 && GET_CODE (SET_DEST (set)) == REG
9884 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9885 && REGNO (SET_DEST (set)) < last_max_reg
9886 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9887 && rtx_equal_p (SET_SRC (set), mem))
9888 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9890 /* See if this copies the mem from a register that isn't
9891 modified afterwards. We'll try to remove the
9892 redundant copy later on by doing a little register
9893 renaming and copy propagation. This will help
9894 to untangle things for the BIV detection code. */
9895 if (set
9896 && ! maybe_never
9897 && GET_CODE (SET_SRC (set)) == REG
9898 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9899 && REGNO (SET_SRC (set)) < last_max_reg
9900 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9901 && rtx_equal_p (SET_DEST (set), mem))
9902 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9904 /* If this is a call which uses / clobbers this memory
9905 location, we must not change the interface here. */
9906 if (GET_CODE (p) == CALL_INSN
9907 && reg_mentioned_p (loop_info->mems[i].mem,
9908 CALL_INSN_FUNCTION_USAGE (p)))
9910 cancel_changes (0);
9911 loop_info->mems[i].optimize = 0;
9912 break;
9914 else
9915 /* Replace the memory reference with the shadow register. */
9916 replace_loop_mems (p, loop_info->mems[i].mem,
9917 loop_info->mems[i].reg);
9920 if (GET_CODE (p) == CODE_LABEL
9921 || GET_CODE (p) == JUMP_INSN)
9922 maybe_never = 1;
9925 if (! loop_info->mems[i].optimize)
9926 ; /* We found we couldn't do the replacement, so do nothing. */
9927 else if (! apply_change_group ())
9928 /* We couldn't replace all occurrences of the MEM. */
9929 loop_info->mems[i].optimize = 0;
9930 else
9932 /* Load the memory immediately before LOOP->START, which is
9933 the NOTE_LOOP_BEG. */
9934 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9935 rtx set;
9936 rtx best = mem;
9937 int j;
9938 struct elt_loc_list *const_equiv = 0;
9940 if (e)
9942 struct elt_loc_list *equiv;
9943 struct elt_loc_list *best_equiv = 0;
9944 for (equiv = e->locs; equiv; equiv = equiv->next)
9946 if (CONSTANT_P (equiv->loc))
9947 const_equiv = equiv;
9948 else if (GET_CODE (equiv->loc) == REG
9949 /* Extending hard register lifetimes causes crash
9950 on SRC targets. Doing so on non-SRC is
9951 probably also not good idea, since we most
9952 probably have pseudoregister equivalence as
9953 well. */
9954 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9955 best_equiv = equiv;
9957 /* Use the constant equivalence if that is cheap enough. */
9958 if (! best_equiv)
9959 best_equiv = const_equiv;
9960 else if (const_equiv
9961 && (rtx_cost (const_equiv->loc, SET)
9962 <= rtx_cost (best_equiv->loc, SET)))
9964 best_equiv = const_equiv;
9965 const_equiv = 0;
9968 /* If best_equiv is nonzero, we know that MEM is set to a
9969 constant or register before the loop. We will use this
9970 knowledge to initialize the shadow register with that
9971 constant or reg rather than by loading from MEM. */
9972 if (best_equiv)
9973 best = copy_rtx (best_equiv->loc);
9976 set = gen_move_insn (reg, best);
9977 set = loop_insn_hoist (loop, set);
9978 if (REG_P (best))
9980 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9981 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9983 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9984 break;
9988 if (const_equiv)
9989 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9991 if (written)
9993 if (label == NULL_RTX)
9995 label = gen_label_rtx ();
9996 emit_label_after (label, loop->end);
9999 /* Store the memory immediately after END, which is
10000 the NOTE_LOOP_END. */
10001 set = gen_move_insn (copy_rtx (mem), reg);
10002 loop_insn_emit_after (loop, 0, label, set);
10005 if (loop_dump_stream)
10007 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10008 REGNO (reg), (written ? "r/w" : "r/o"));
10009 print_rtl (loop_dump_stream, mem);
10010 fputc ('\n', loop_dump_stream);
10013 /* Attempt a bit of copy propagation. This helps untangle the
10014 data flow, and enables {basic,general}_induction_var to find
10015 more bivs/givs. */
10016 EXECUTE_IF_SET_IN_REG_SET
10017 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10019 try_copy_prop (loop, reg, j);
10021 CLEAR_REG_SET (&load_copies);
10023 EXECUTE_IF_SET_IN_REG_SET
10024 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10026 try_swap_copy_prop (loop, reg, j);
10028 CLEAR_REG_SET (&store_copies);
10032 if (label != NULL_RTX && end_label != NULL_RTX)
10034 /* Now, we need to replace all references to the previous exit
10035 label with the new one. */
10036 rtx_pair rr;
10037 rr.r1 = end_label;
10038 rr.r2 = label;
10040 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10042 for_each_rtx (&p, replace_label, &rr);
10044 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10045 field. This is not handled by for_each_rtx because it doesn't
10046 handle unprinted ('0') fields. We need to update JUMP_LABEL
10047 because the immediately following unroll pass will use it.
10048 replace_label would not work anyways, because that only handles
10049 LABEL_REFs. */
10050 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10051 JUMP_LABEL (p) = label;
10055 cselib_finish ();
10058 /* For communication between note_reg_stored and its caller. */
10059 struct note_reg_stored_arg
10061 int set_seen;
10062 rtx reg;
10065 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10066 is equal to ARG. */
10067 static void
10068 note_reg_stored (x, setter, arg)
10069 rtx x, setter ATTRIBUTE_UNUSED;
10070 void *arg;
10072 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10073 if (t->reg == x)
10074 t->set_seen = 1;
10077 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10078 There must be exactly one insn that sets this pseudo; it will be
10079 deleted if all replacements succeed and we can prove that the register
10080 is not used after the loop. */
10082 static void
10083 try_copy_prop (loop, replacement, regno)
10084 const struct loop *loop;
10085 rtx replacement;
10086 unsigned int regno;
10088 /* This is the reg that we are copying from. */
10089 rtx reg_rtx = regno_reg_rtx[regno];
10090 rtx init_insn = 0;
10091 rtx insn;
10092 /* These help keep track of whether we replaced all uses of the reg. */
10093 int replaced_last = 0;
10094 int store_is_first = 0;
10096 for (insn = next_insn_in_loop (loop, loop->scan_start);
10097 insn != NULL_RTX;
10098 insn = next_insn_in_loop (loop, insn))
10100 rtx set;
10102 /* Only substitute within one extended basic block from the initializing
10103 insn. */
10104 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10105 break;
10107 if (! INSN_P (insn))
10108 continue;
10110 /* Is this the initializing insn? */
10111 set = single_set (insn);
10112 if (set
10113 && GET_CODE (SET_DEST (set)) == REG
10114 && REGNO (SET_DEST (set)) == regno)
10116 if (init_insn)
10117 abort ();
10119 init_insn = insn;
10120 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10121 store_is_first = 1;
10124 /* Only substitute after seeing the initializing insn. */
10125 if (init_insn && insn != init_insn)
10127 struct note_reg_stored_arg arg;
10129 replace_loop_regs (insn, reg_rtx, replacement);
10130 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10131 replaced_last = 1;
10133 /* Stop replacing when REPLACEMENT is modified. */
10134 arg.reg = replacement;
10135 arg.set_seen = 0;
10136 note_stores (PATTERN (insn), note_reg_stored, &arg);
10137 if (arg.set_seen)
10139 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10141 /* It is possible that we've turned previously valid REG_EQUAL to
10142 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10143 REPLACEMENT is modified, we get different meaning. */
10144 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10145 remove_note (insn, note);
10146 break;
10150 if (! init_insn)
10151 abort ();
10152 if (apply_change_group ())
10154 if (loop_dump_stream)
10155 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10156 if (store_is_first && replaced_last)
10158 rtx first;
10159 rtx retval_note;
10161 /* Assume we're just deleting INIT_INSN. */
10162 first = init_insn;
10163 /* Look for REG_RETVAL note. If we're deleting the end of
10164 the libcall sequence, the whole sequence can go. */
10165 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10166 /* If we found a REG_RETVAL note, find the first instruction
10167 in the sequence. */
10168 if (retval_note)
10169 first = XEXP (retval_note, 0);
10171 /* Delete the instructions. */
10172 loop_delete_insns (first, init_insn);
10174 if (loop_dump_stream)
10175 fprintf (loop_dump_stream, ".\n");
10179 /* Replace all the instructions from FIRST up to and including LAST
10180 with NOTE_INSN_DELETED notes. */
10182 static void
10183 loop_delete_insns (first, last)
10184 rtx first;
10185 rtx last;
10187 while (1)
10189 if (loop_dump_stream)
10190 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10191 INSN_UID (first));
10192 delete_insn (first);
10194 /* If this was the LAST instructions we're supposed to delete,
10195 we're done. */
10196 if (first == last)
10197 break;
10199 first = NEXT_INSN (first);
10203 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10204 loop LOOP if the order of the sets of these registers can be
10205 swapped. There must be exactly one insn within the loop that sets
10206 this pseudo followed immediately by a move insn that sets
10207 REPLACEMENT with REGNO. */
10208 static void
10209 try_swap_copy_prop (loop, replacement, regno)
10210 const struct loop *loop;
10211 rtx replacement;
10212 unsigned int regno;
10214 rtx insn;
10215 rtx set = NULL_RTX;
10216 unsigned int new_regno;
10218 new_regno = REGNO (replacement);
10220 for (insn = next_insn_in_loop (loop, loop->scan_start);
10221 insn != NULL_RTX;
10222 insn = next_insn_in_loop (loop, insn))
10224 /* Search for the insn that copies REGNO to NEW_REGNO? */
10225 if (INSN_P (insn)
10226 && (set = single_set (insn))
10227 && GET_CODE (SET_DEST (set)) == REG
10228 && REGNO (SET_DEST (set)) == new_regno
10229 && GET_CODE (SET_SRC (set)) == REG
10230 && REGNO (SET_SRC (set)) == regno)
10231 break;
10234 if (insn != NULL_RTX)
10236 rtx prev_insn;
10237 rtx prev_set;
10239 /* Some DEF-USE info would come in handy here to make this
10240 function more general. For now, just check the previous insn
10241 which is the most likely candidate for setting REGNO. */
10243 prev_insn = PREV_INSN (insn);
10245 if (INSN_P (insn)
10246 && (prev_set = single_set (prev_insn))
10247 && GET_CODE (SET_DEST (prev_set)) == REG
10248 && REGNO (SET_DEST (prev_set)) == regno)
10250 /* We have:
10251 (set (reg regno) (expr))
10252 (set (reg new_regno) (reg regno))
10254 so try converting this to:
10255 (set (reg new_regno) (expr))
10256 (set (reg regno) (reg new_regno))
10258 The former construct is often generated when a global
10259 variable used for an induction variable is shadowed by a
10260 register (NEW_REGNO). The latter construct improves the
10261 chances of GIV replacement and BIV elimination. */
10263 validate_change (prev_insn, &SET_DEST (prev_set),
10264 replacement, 1);
10265 validate_change (insn, &SET_DEST (set),
10266 SET_SRC (set), 1);
10267 validate_change (insn, &SET_SRC (set),
10268 replacement, 1);
10270 if (apply_change_group ())
10272 if (loop_dump_stream)
10273 fprintf (loop_dump_stream,
10274 " Swapped set of reg %d at %d with reg %d at %d.\n",
10275 regno, INSN_UID (insn),
10276 new_regno, INSN_UID (prev_insn));
10278 /* Update first use of REGNO. */
10279 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10280 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10282 /* Now perform copy propagation to hopefully
10283 remove all uses of REGNO within the loop. */
10284 try_copy_prop (loop, replacement, regno);
10290 /* Replace MEM with its associated pseudo register. This function is
10291 called from load_mems via for_each_rtx. DATA is actually a pointer
10292 to a structure describing the instruction currently being scanned
10293 and the MEM we are currently replacing. */
10295 static int
10296 replace_loop_mem (mem, data)
10297 rtx *mem;
10298 void *data;
10300 loop_replace_args *args = (loop_replace_args *) data;
10301 rtx m = *mem;
10303 if (m == NULL_RTX)
10304 return 0;
10306 switch (GET_CODE (m))
10308 case MEM:
10309 break;
10311 case CONST_DOUBLE:
10312 /* We're not interested in the MEM associated with a
10313 CONST_DOUBLE, so there's no need to traverse into one. */
10314 return -1;
10316 default:
10317 /* This is not a MEM. */
10318 return 0;
10321 if (!rtx_equal_p (args->match, m))
10322 /* This is not the MEM we are currently replacing. */
10323 return 0;
10325 /* Actually replace the MEM. */
10326 validate_change (args->insn, mem, args->replacement, 1);
10328 return 0;
10331 static void
10332 replace_loop_mems (insn, mem, reg)
10333 rtx insn;
10334 rtx mem;
10335 rtx reg;
10337 loop_replace_args args;
10339 args.insn = insn;
10340 args.match = mem;
10341 args.replacement = reg;
10343 for_each_rtx (&insn, replace_loop_mem, &args);
10346 /* Replace one register with another. Called through for_each_rtx; PX points
10347 to the rtx being scanned. DATA is actually a pointer to
10348 a structure of arguments. */
10350 static int
10351 replace_loop_reg (px, data)
10352 rtx *px;
10353 void *data;
10355 rtx x = *px;
10356 loop_replace_args *args = (loop_replace_args *) data;
10358 if (x == NULL_RTX)
10359 return 0;
10361 if (x == args->match)
10362 validate_change (args->insn, px, args->replacement, 1);
10364 return 0;
10367 static void
10368 replace_loop_regs (insn, reg, replacement)
10369 rtx insn;
10370 rtx reg;
10371 rtx replacement;
10373 loop_replace_args args;
10375 args.insn = insn;
10376 args.match = reg;
10377 args.replacement = replacement;
10379 for_each_rtx (&insn, replace_loop_reg, &args);
10382 /* Replace occurrences of the old exit label for the loop with the new
10383 one. DATA is an rtx_pair containing the old and new labels,
10384 respectively. */
10386 static int
10387 replace_label (x, data)
10388 rtx *x;
10389 void *data;
10391 rtx l = *x;
10392 rtx old_label = ((rtx_pair *) data)->r1;
10393 rtx new_label = ((rtx_pair *) data)->r2;
10395 if (l == NULL_RTX)
10396 return 0;
10398 if (GET_CODE (l) != LABEL_REF)
10399 return 0;
10401 if (XEXP (l, 0) != old_label)
10402 return 0;
10404 XEXP (l, 0) = new_label;
10405 ++LABEL_NUSES (new_label);
10406 --LABEL_NUSES (old_label);
10408 return 0;
10411 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10412 (ignored in the interim). */
10414 static rtx
10415 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10416 const struct loop *loop ATTRIBUTE_UNUSED;
10417 basic_block where_bb ATTRIBUTE_UNUSED;
10418 rtx where_insn;
10419 rtx pattern;
10421 return emit_insn_after (pattern, where_insn);
10425 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10426 in basic block WHERE_BB (ignored in the interim) within the loop
10427 otherwise hoist PATTERN into the loop pre-header. */
10430 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10431 const struct loop *loop;
10432 basic_block where_bb ATTRIBUTE_UNUSED;
10433 rtx where_insn;
10434 rtx pattern;
10436 if (! where_insn)
10437 return loop_insn_hoist (loop, pattern);
10438 return emit_insn_before (pattern, where_insn);
10442 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10443 WHERE_BB (ignored in the interim) within the loop. */
10445 static rtx
10446 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10447 const struct loop *loop ATTRIBUTE_UNUSED;
10448 basic_block where_bb ATTRIBUTE_UNUSED;
10449 rtx where_insn;
10450 rtx pattern;
10452 return emit_call_insn_before (pattern, where_insn);
10456 /* Hoist insn for PATTERN into the loop pre-header. */
10459 loop_insn_hoist (loop, pattern)
10460 const struct loop *loop;
10461 rtx pattern;
10463 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10467 /* Hoist call insn for PATTERN into the loop pre-header. */
10469 static rtx
10470 loop_call_insn_hoist (loop, pattern)
10471 const struct loop *loop;
10472 rtx pattern;
10474 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10478 /* Sink insn for PATTERN after the loop end. */
10481 loop_insn_sink (loop, pattern)
10482 const struct loop *loop;
10483 rtx pattern;
10485 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10488 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10489 and constant. Emit sequence of intructions to load it into REG */
10490 static rtx
10491 gen_load_of_final_value (reg, final_value)
10492 rtx reg, final_value;
10494 rtx seq;
10495 start_sequence ();
10496 final_value = force_operand (final_value, reg);
10497 if (final_value != reg)
10498 emit_move_insn (reg, final_value);
10499 seq = get_insns ();
10500 end_sequence ();
10501 return seq;
10504 /* If the loop has multiple exits, emit insn for PATTERN before the
10505 loop to ensure that it will always be executed no matter how the
10506 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10507 since this is slightly more efficient. */
10509 static rtx
10510 loop_insn_sink_or_swim (loop, pattern)
10511 const struct loop *loop;
10512 rtx pattern;
10514 if (loop->exit_count)
10515 return loop_insn_hoist (loop, pattern);
10516 else
10517 return loop_insn_sink (loop, pattern);
10520 static void
10521 loop_ivs_dump (loop, file, verbose)
10522 const struct loop *loop;
10523 FILE *file;
10524 int verbose;
10526 struct iv_class *bl;
10527 int iv_num = 0;
10529 if (! loop || ! file)
10530 return;
10532 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10533 iv_num++;
10535 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10537 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10539 loop_iv_class_dump (bl, file, verbose);
10540 fputc ('\n', file);
10545 static void
10546 loop_iv_class_dump (bl, file, verbose)
10547 const struct iv_class *bl;
10548 FILE *file;
10549 int verbose ATTRIBUTE_UNUSED;
10551 struct induction *v;
10552 rtx incr;
10553 int i;
10555 if (! bl || ! file)
10556 return;
10558 fprintf (file, "IV class for reg %d, benefit %d\n",
10559 bl->regno, bl->total_benefit);
10561 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10562 if (bl->initial_value)
10564 fprintf (file, ", init val: ");
10565 print_simple_rtl (file, bl->initial_value);
10567 if (bl->initial_test)
10569 fprintf (file, ", init test: ");
10570 print_simple_rtl (file, bl->initial_test);
10572 fputc ('\n', file);
10574 if (bl->final_value)
10576 fprintf (file, " Final val: ");
10577 print_simple_rtl (file, bl->final_value);
10578 fputc ('\n', file);
10581 if ((incr = biv_total_increment (bl)))
10583 fprintf (file, " Total increment: ");
10584 print_simple_rtl (file, incr);
10585 fputc ('\n', file);
10588 /* List the increments. */
10589 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10591 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10592 print_simple_rtl (file, v->add_val);
10593 fputc ('\n', file);
10596 /* List the givs. */
10597 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10599 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10600 i, INSN_UID (v->insn), v->benefit);
10601 if (v->giv_type == DEST_ADDR)
10602 print_simple_rtl (file, v->mem);
10603 else
10604 print_simple_rtl (file, single_set (v->insn));
10605 fputc ('\n', file);
10610 static void
10611 loop_biv_dump (v, file, verbose)
10612 const struct induction *v;
10613 FILE *file;
10614 int verbose;
10616 if (! v || ! file)
10617 return;
10619 fprintf (file,
10620 "Biv %d: insn %d",
10621 REGNO (v->dest_reg), INSN_UID (v->insn));
10622 fprintf (file, " const ");
10623 print_simple_rtl (file, v->add_val);
10625 if (verbose && v->final_value)
10627 fputc ('\n', file);
10628 fprintf (file, " final ");
10629 print_simple_rtl (file, v->final_value);
10632 fputc ('\n', file);
10636 static void
10637 loop_giv_dump (v, file, verbose)
10638 const struct induction *v;
10639 FILE *file;
10640 int verbose;
10642 if (! v || ! file)
10643 return;
10645 if (v->giv_type == DEST_REG)
10646 fprintf (file, "Giv %d: insn %d",
10647 REGNO (v->dest_reg), INSN_UID (v->insn));
10648 else
10649 fprintf (file, "Dest address: insn %d",
10650 INSN_UID (v->insn));
10652 fprintf (file, " src reg %d benefit %d",
10653 REGNO (v->src_reg), v->benefit);
10654 fprintf (file, " lifetime %d",
10655 v->lifetime);
10657 if (v->replaceable)
10658 fprintf (file, " replaceable");
10660 if (v->no_const_addval)
10661 fprintf (file, " ncav");
10663 if (v->ext_dependent)
10665 switch (GET_CODE (v->ext_dependent))
10667 case SIGN_EXTEND:
10668 fprintf (file, " ext se");
10669 break;
10670 case ZERO_EXTEND:
10671 fprintf (file, " ext ze");
10672 break;
10673 case TRUNCATE:
10674 fprintf (file, " ext tr");
10675 break;
10676 default:
10677 abort ();
10681 fputc ('\n', file);
10682 fprintf (file, " mult ");
10683 print_simple_rtl (file, v->mult_val);
10685 fputc ('\n', file);
10686 fprintf (file, " add ");
10687 print_simple_rtl (file, v->add_val);
10689 if (verbose && v->final_value)
10691 fputc ('\n', file);
10692 fprintf (file, " final ");
10693 print_simple_rtl (file, v->final_value);
10696 fputc ('\n', file);
10700 void
10701 debug_ivs (loop)
10702 const struct loop *loop;
10704 loop_ivs_dump (loop, stderr, 1);
10708 void
10709 debug_iv_class (bl)
10710 const struct iv_class *bl;
10712 loop_iv_class_dump (bl, stderr, 1);
10716 void
10717 debug_biv (v)
10718 const struct induction *v;
10720 loop_biv_dump (v, stderr, 1);
10724 void
10725 debug_giv (v)
10726 const struct induction *v;
10728 loop_giv_dump (v, stderr, 1);
10732 #define LOOP_BLOCK_NUM_1(INSN) \
10733 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10735 /* The notes do not have an assigned block, so look at the next insn. */
10736 #define LOOP_BLOCK_NUM(INSN) \
10737 ((INSN) ? (GET_CODE (INSN) == NOTE \
10738 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10739 : LOOP_BLOCK_NUM_1 (INSN)) \
10740 : -1)
10742 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10744 static void
10745 loop_dump_aux (loop, file, verbose)
10746 const struct loop *loop;
10747 FILE *file;
10748 int verbose ATTRIBUTE_UNUSED;
10750 rtx label;
10752 if (! loop || ! file)
10753 return;
10755 /* Print diagnostics to compare our concept of a loop with
10756 what the loop notes say. */
10757 if (! PREV_INSN (loop->first->head)
10758 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10759 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10760 != NOTE_INSN_LOOP_BEG)
10761 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10762 INSN_UID (PREV_INSN (loop->first->head)));
10763 if (! NEXT_INSN (loop->last->end)
10764 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10765 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10766 != NOTE_INSN_LOOP_END)
10767 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10768 INSN_UID (NEXT_INSN (loop->last->end)));
10770 if (loop->start)
10772 fprintf (file,
10773 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10774 LOOP_BLOCK_NUM (loop->start),
10775 LOOP_INSN_UID (loop->start),
10776 LOOP_BLOCK_NUM (loop->cont),
10777 LOOP_INSN_UID (loop->cont),
10778 LOOP_BLOCK_NUM (loop->cont),
10779 LOOP_INSN_UID (loop->cont),
10780 LOOP_BLOCK_NUM (loop->vtop),
10781 LOOP_INSN_UID (loop->vtop),
10782 LOOP_BLOCK_NUM (loop->end),
10783 LOOP_INSN_UID (loop->end));
10784 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10785 LOOP_BLOCK_NUM (loop->top),
10786 LOOP_INSN_UID (loop->top),
10787 LOOP_BLOCK_NUM (loop->scan_start),
10788 LOOP_INSN_UID (loop->scan_start));
10789 fprintf (file, ";; exit_count %d", loop->exit_count);
10790 if (loop->exit_count)
10792 fputs (", labels:", file);
10793 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10795 fprintf (file, " %d ",
10796 LOOP_INSN_UID (XEXP (label, 0)));
10799 fputs ("\n", file);
10801 /* This can happen when a marked loop appears as two nested loops,
10802 say from while (a || b) {}. The inner loop won't match
10803 the loop markers but the outer one will. */
10804 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10805 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10809 /* Call this function from the debugger to dump LOOP. */
10811 void
10812 debug_loop (loop)
10813 const struct loop *loop;
10815 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10818 /* Call this function from the debugger to dump LOOPS. */
10820 void
10821 debug_loops (loops)
10822 const struct loops *loops;
10824 flow_loops_dump (loops, stderr, loop_dump_aux, 1);