2002-11-21 Phil Edwards <pme@gcc.gnu.org>
[official-gcc.git] / gcc / loop.c
blob4814cdd6e9b978c5599b4f3ca195352c9167bf3e
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "hard-reg-set.h"
44 #include "basic-block.h"
45 #include "insn-config.h"
46 #include "regs.h"
47 #include "recog.h"
48 #include "flags.h"
49 #include "real.h"
50 #include "loop.h"
51 #include "cselib.h"
52 #include "except.h"
53 #include "toplev.h"
54 #include "predict.h"
55 #include "insn-flags.h"
56 #include "optabs.h"
58 /* Not really meaningful values, but at least something. */
59 #ifndef SIMULTANEOUS_PREFETCHES
60 #define SIMULTANEOUS_PREFETCHES 3
61 #endif
62 #ifndef PREFETCH_BLOCK
63 #define PREFETCH_BLOCK 32
64 #endif
65 #ifndef HAVE_prefetch
66 #define HAVE_prefetch 0
67 #define CODE_FOR_prefetch 0
68 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
69 #endif
71 /* Give up the prefetch optimizations once we exceed a given threshhold.
72 It is unlikely that we would be able to optimize something in a loop
73 with so many detected prefetches. */
74 #define MAX_PREFETCHES 100
75 /* The number of prefetch blocks that are beneficial to fetch at once before
76 a loop with a known (and low) iteration count. */
77 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
78 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
79 since it is likely that the data are already in the cache. */
80 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* Parameterize some prefetch heuristics so they can be turned on and off
83 easily for performance testing on new architecures. These can be
84 defined in target-dependent files. */
86 /* Prefetch is worthwhile only when loads/stores are dense. */
87 #ifndef PREFETCH_ONLY_DENSE_MEM
88 #define PREFETCH_ONLY_DENSE_MEM 1
89 #endif
91 /* Define what we mean by "dense" loads and stores; This value divided by 256
92 is the minimum percentage of memory references that worth prefetching. */
93 #ifndef PREFETCH_DENSE_MEM
94 #define PREFETCH_DENSE_MEM 220
95 #endif
97 /* Do not prefetch for a loop whose iteration count is known to be low. */
98 #ifndef PREFETCH_NO_LOW_LOOPCNT
99 #define PREFETCH_NO_LOW_LOOPCNT 1
100 #endif
102 /* Define what we mean by a "low" iteration count. */
103 #ifndef PREFETCH_LOW_LOOPCNT
104 #define PREFETCH_LOW_LOOPCNT 32
105 #endif
107 /* Do not prefetch for a loop that contains a function call; such a loop is
108 probably not an internal loop. */
109 #ifndef PREFETCH_NO_CALL
110 #define PREFETCH_NO_CALL 1
111 #endif
113 /* Do not prefetch accesses with an extreme stride. */
114 #ifndef PREFETCH_NO_EXTREME_STRIDE
115 #define PREFETCH_NO_EXTREME_STRIDE 1
116 #endif
118 /* Define what we mean by an "extreme" stride. */
119 #ifndef PREFETCH_EXTREME_STRIDE
120 #define PREFETCH_EXTREME_STRIDE 4096
121 #endif
123 /* Define a limit to how far apart indices can be and still be merged
124 into a single prefetch. */
125 #ifndef PREFETCH_EXTREME_DIFFERENCE
126 #define PREFETCH_EXTREME_DIFFERENCE 4096
127 #endif
129 /* Issue prefetch instructions before the loop to fetch data to be used
130 in the first few loop iterations. */
131 #ifndef PREFETCH_BEFORE_LOOP
132 #define PREFETCH_BEFORE_LOOP 1
133 #endif
135 /* Do not handle reversed order prefetches (negative stride). */
136 #ifndef PREFETCH_NO_REVERSE_ORDER
137 #define PREFETCH_NO_REVERSE_ORDER 1
138 #endif
140 /* Prefetch even if the GIV is in conditional code. */
141 #ifndef PREFETCH_CONDITIONAL
142 #define PREFETCH_CONDITIONAL 1
143 #endif
145 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
146 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
148 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
149 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
150 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
152 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
153 ((REGNO) < FIRST_PSEUDO_REGISTER \
154 ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
157 /* Vector mapping INSN_UIDs to luids.
158 The luids are like uids but increase monotonically always.
159 We use them to see whether a jump comes from outside a given loop. */
161 int *uid_luid;
163 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
164 number the insn is contained in. */
166 struct loop **uid_loop;
168 /* 1 + largest uid of any insn. */
170 int max_uid_for_loop;
172 /* 1 + luid of last insn. */
174 static int max_luid;
176 /* Number of loops detected in current function. Used as index to the
177 next few tables. */
179 static int max_loop_num;
181 /* Bound on pseudo register number before loop optimization.
182 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
183 unsigned int max_reg_before_loop;
185 /* The value to pass to the next call of reg_scan_update. */
186 static int loop_max_reg;
188 /* During the analysis of a loop, a chain of `struct movable's
189 is made to record all the movable insns found.
190 Then the entire chain can be scanned to decide which to move. */
192 struct movable
194 rtx insn; /* A movable insn */
195 rtx set_src; /* The expression this reg is set from. */
196 rtx set_dest; /* The destination of this SET. */
197 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
198 of any registers used within the LIBCALL. */
199 int consec; /* Number of consecutive following insns
200 that must be moved with this one. */
201 unsigned int regno; /* The register it sets */
202 short lifetime; /* lifetime of that register;
203 may be adjusted when matching movables
204 that load the same value are found. */
205 short savings; /* Number of insns we can move for this reg,
206 including other movables that force this
207 or match this one. */
208 unsigned int cond : 1; /* 1 if only conditionally movable */
209 unsigned int force : 1; /* 1 means MUST move this insn */
210 unsigned int global : 1; /* 1 means reg is live outside this loop */
211 /* If PARTIAL is 1, GLOBAL means something different:
212 that the reg is live outside the range from where it is set
213 to the following label. */
214 unsigned int done : 1; /* 1 inhibits further processing of this */
216 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
217 In particular, moving it does not make it
218 invariant. */
219 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
220 load SRC, rather than copying INSN. */
221 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
222 first insn of a consecutive sets group. */
223 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
224 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
225 that we should avoid changing when clearing
226 the rest of the reg. */
227 struct movable *match; /* First entry for same value */
228 struct movable *forces; /* An insn that must be moved if this is */
229 struct movable *next;
233 FILE *loop_dump_stream;
235 /* Forward declarations. */
237 static void invalidate_loops_containing_label PARAMS ((rtx));
238 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
239 static void mark_loop_jump PARAMS ((rtx, struct loop *));
240 static void prescan_loop PARAMS ((struct loop *));
241 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
242 static int consec_sets_invariant_p PARAMS ((const struct loop *,
243 rtx, int, rtx));
244 static int labels_in_range_p PARAMS ((rtx, int));
245 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct loop_movables *));
256 static void force_movables PARAMS ((struct loop_movables *));
257 static void combine_movables PARAMS ((struct loop_movables *,
258 struct loop_regs *));
259 static int num_unmoved_movables PARAMS ((const struct loop *));
260 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
261 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
262 struct loop_regs *));
263 static void add_label_notes PARAMS ((rtx, rtx));
264 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
265 int, int));
266 static void loop_movables_add PARAMS((struct loop_movables *,
267 struct movable *));
268 static void loop_movables_free PARAMS((struct loop_movables *));
269 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
270 static void loop_bivs_find PARAMS((struct loop *));
271 static void loop_bivs_init_find PARAMS((struct loop *));
272 static void loop_bivs_check PARAMS((struct loop *));
273 static void loop_givs_find PARAMS((struct loop *));
274 static void loop_givs_check PARAMS((struct loop *));
275 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
276 int, int));
277 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
278 struct induction *, rtx));
279 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
282 rtx *));
283 static void loop_ivs_free PARAMS((struct loop *));
284 static void strength_reduce PARAMS ((struct loop *, int));
285 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
286 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
287 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
288 static void record_biv PARAMS ((struct loop *, struct induction *,
289 rtx, rtx, rtx, rtx, rtx *,
290 int, int));
291 static void check_final_value PARAMS ((const struct loop *,
292 struct induction *));
293 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
294 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
295 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
296 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
297 static void record_giv PARAMS ((const struct loop *, struct induction *,
298 rtx, rtx, rtx, rtx, rtx, rtx, int,
299 enum g_types, int, int, rtx *));
300 static void update_giv_derive PARAMS ((const struct loop *, rtx));
301 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
302 struct loop_info *));
303 static int basic_induction_var PARAMS ((const struct loop *, rtx,
304 enum machine_mode, rtx, rtx,
305 rtx *, rtx *, rtx **));
306 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
307 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
308 rtx *, rtx *, rtx *, int, int *,
309 enum machine_mode));
310 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
311 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
312 static int check_dbra_loop PARAMS ((struct loop *, int));
313 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
314 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
315 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
316 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
317 static int product_cheap_p PARAMS ((rtx, rtx));
318 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
319 int, int, int));
320 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
321 struct iv_class *, int,
322 basic_block, rtx));
323 static int last_use_this_basic_block PARAMS ((rtx, rtx));
324 static void record_initial PARAMS ((rtx, rtx, void *));
325 static void update_reg_last_use PARAMS ((rtx, rtx));
326 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
327 static void loop_regs_scan PARAMS ((const struct loop *, int));
328 static int count_insns_in_loop PARAMS ((const struct loop *));
329 static void load_mems PARAMS ((const struct loop *));
330 static int insert_loop_mem PARAMS ((rtx *, void *));
331 static int replace_loop_mem PARAMS ((rtx *, void *));
332 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
333 static int replace_loop_reg PARAMS ((rtx *, void *));
334 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
335 static void note_reg_stored PARAMS ((rtx, rtx, void *));
336 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
337 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
338 unsigned int));
339 static int replace_label PARAMS ((rtx *, void *));
340 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
341 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
342 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
343 static void loop_regs_update PARAMS ((const struct loop *, rtx));
344 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
346 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
347 rtx, rtx));
348 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
349 basic_block, rtx, rtx));
350 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
351 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
353 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
354 static void loop_delete_insns PARAMS ((rtx, rtx));
355 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
356 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
357 void debug_ivs PARAMS ((const struct loop *));
358 void debug_iv_class PARAMS ((const struct iv_class *));
359 void debug_biv PARAMS ((const struct induction *));
360 void debug_giv PARAMS ((const struct induction *));
361 void debug_loop PARAMS ((const struct loop *));
362 void debug_loops PARAMS ((const struct loops *));
364 typedef struct rtx_pair
366 rtx r1;
367 rtx r2;
368 } rtx_pair;
370 typedef struct loop_replace_args
372 rtx match;
373 rtx replacement;
374 rtx insn;
375 } loop_replace_args;
377 /* Nonzero iff INSN is between START and END, inclusive. */
378 #define INSN_IN_RANGE_P(INSN, START, END) \
379 (INSN_UID (INSN) < max_uid_for_loop \
380 && INSN_LUID (INSN) >= INSN_LUID (START) \
381 && INSN_LUID (INSN) <= INSN_LUID (END))
383 /* Indirect_jump_in_function is computed once per function. */
384 static int indirect_jump_in_function;
385 static int indirect_jump_in_function_p PARAMS ((rtx));
387 static int compute_luids PARAMS ((rtx, rtx, int));
389 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
390 struct induction *,
391 rtx));
393 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
394 copy the value of the strength reduced giv to its original register. */
395 static int copy_cost;
397 /* Cost of using a register, to normalize the benefits of a giv. */
398 static int reg_address_cost;
400 void
401 init_loop ()
403 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
405 reg_address_cost = address_cost (reg, SImode);
407 copy_cost = COSTS_N_INSNS (1);
410 /* Compute the mapping from uids to luids.
411 LUIDs are numbers assigned to insns, like uids,
412 except that luids increase monotonically through the code.
413 Start at insn START and stop just before END. Assign LUIDs
414 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
415 static int
416 compute_luids (start, end, prev_luid)
417 rtx start, end;
418 int prev_luid;
420 int i;
421 rtx insn;
423 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
425 if (INSN_UID (insn) >= max_uid_for_loop)
426 continue;
427 /* Don't assign luids to line-number NOTEs, so that the distance in
428 luids between two insns is not affected by -g. */
429 if (GET_CODE (insn) != NOTE
430 || NOTE_LINE_NUMBER (insn) <= 0)
431 uid_luid[INSN_UID (insn)] = ++i;
432 else
433 /* Give a line number note the same luid as preceding insn. */
434 uid_luid[INSN_UID (insn)] = i;
436 return i + 1;
439 /* Entry point of this file. Perform loop optimization
440 on the current function. F is the first insn of the function
441 and DUMPFILE is a stream for output of a trace of actions taken
442 (or 0 if none should be output). */
444 void
445 loop_optimize (f, dumpfile, flags)
446 /* f is the first instruction of a chain of insns for one function */
447 rtx f;
448 FILE *dumpfile;
449 int flags;
451 rtx insn;
452 int i;
453 struct loops loops_data;
454 struct loops *loops = &loops_data;
455 struct loop_info *loops_info;
457 loop_dump_stream = dumpfile;
459 init_recog_no_volatile ();
461 max_reg_before_loop = max_reg_num ();
462 loop_max_reg = max_reg_before_loop;
464 regs_may_share = 0;
466 /* Count the number of loops. */
468 max_loop_num = 0;
469 for (insn = f; insn; insn = NEXT_INSN (insn))
471 if (GET_CODE (insn) == NOTE
472 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
473 max_loop_num++;
476 /* Don't waste time if no loops. */
477 if (max_loop_num == 0)
478 return;
480 loops->num = max_loop_num;
482 /* Get size to use for tables indexed by uids.
483 Leave some space for labels allocated by find_and_verify_loops. */
484 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
486 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
487 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
488 sizeof (struct loop *));
490 /* Allocate storage for array of loops. */
491 loops->array = (struct loop *)
492 xcalloc (loops->num, sizeof (struct loop));
494 /* Find and process each loop.
495 First, find them, and record them in order of their beginnings. */
496 find_and_verify_loops (f, loops);
498 /* Allocate and initialize auxiliary loop information. */
499 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
500 for (i = 0; i < loops->num; i++)
501 loops->array[i].aux = loops_info + i;
503 /* Now find all register lifetimes. This must be done after
504 find_and_verify_loops, because it might reorder the insns in the
505 function. */
506 reg_scan (f, max_reg_before_loop, 1);
508 /* This must occur after reg_scan so that registers created by gcse
509 will have entries in the register tables.
511 We could have added a call to reg_scan after gcse_main in toplev.c,
512 but moving this call to init_alias_analysis is more efficient. */
513 init_alias_analysis ();
515 /* See if we went too far. Note that get_max_uid already returns
516 one more that the maximum uid of all insn. */
517 if (get_max_uid () > max_uid_for_loop)
518 abort ();
519 /* Now reset it to the actual size we need. See above. */
520 max_uid_for_loop = get_max_uid ();
522 /* find_and_verify_loops has already called compute_luids, but it
523 might have rearranged code afterwards, so we need to recompute
524 the luids now. */
525 max_luid = compute_luids (f, NULL_RTX, 0);
527 /* Don't leave gaps in uid_luid for insns that have been
528 deleted. It is possible that the first or last insn
529 using some register has been deleted by cross-jumping.
530 Make sure that uid_luid for that former insn's uid
531 points to the general area where that insn used to be. */
532 for (i = 0; i < max_uid_for_loop; i++)
534 uid_luid[0] = uid_luid[i];
535 if (uid_luid[0] != 0)
536 break;
538 for (i = 0; i < max_uid_for_loop; i++)
539 if (uid_luid[i] == 0)
540 uid_luid[i] = uid_luid[i - 1];
542 /* Determine if the function has indirect jump. On some systems
543 this prevents low overhead loop instructions from being used. */
544 indirect_jump_in_function = indirect_jump_in_function_p (f);
546 /* Now scan the loops, last ones first, since this means inner ones are done
547 before outer ones. */
548 for (i = max_loop_num - 1; i >= 0; i--)
550 struct loop *loop = &loops->array[i];
552 if (! loop->invalid && loop->end)
553 scan_loop (loop, flags);
556 end_alias_analysis ();
558 /* Clean up. */
559 free (uid_luid);
560 free (uid_loop);
561 free (loops_info);
562 free (loops->array);
565 /* Returns the next insn, in execution order, after INSN. START and
566 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
567 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
568 insn-stream; it is used with loops that are entered near the
569 bottom. */
571 static rtx
572 next_insn_in_loop (loop, insn)
573 const struct loop *loop;
574 rtx insn;
576 insn = NEXT_INSN (insn);
578 if (insn == loop->end)
580 if (loop->top)
581 /* Go to the top of the loop, and continue there. */
582 insn = loop->top;
583 else
584 /* We're done. */
585 insn = NULL_RTX;
588 if (insn == loop->scan_start)
589 /* We're done. */
590 insn = NULL_RTX;
592 return insn;
595 /* Optimize one loop described by LOOP. */
597 /* ??? Could also move memory writes out of loops if the destination address
598 is invariant, the source is invariant, the memory write is not volatile,
599 and if we can prove that no read inside the loop can read this address
600 before the write occurs. If there is a read of this address after the
601 write, then we can also mark the memory read as invariant. */
603 static void
604 scan_loop (loop, flags)
605 struct loop *loop;
606 int flags;
608 struct loop_info *loop_info = LOOP_INFO (loop);
609 struct loop_regs *regs = LOOP_REGS (loop);
610 int i;
611 rtx loop_start = loop->start;
612 rtx loop_end = loop->end;
613 rtx p;
614 /* 1 if we are scanning insns that could be executed zero times. */
615 int maybe_never = 0;
616 /* 1 if we are scanning insns that might never be executed
617 due to a subroutine call which might exit before they are reached. */
618 int call_passed = 0;
619 /* Jump insn that enters the loop, or 0 if control drops in. */
620 rtx loop_entry_jump = 0;
621 /* Number of insns in the loop. */
622 int insn_count;
623 int tem;
624 rtx temp, update_start, update_end;
625 /* The SET from an insn, if it is the only SET in the insn. */
626 rtx set, set1;
627 /* Chain describing insns movable in current loop. */
628 struct loop_movables *movables = LOOP_MOVABLES (loop);
629 /* Ratio of extra register life span we can justify
630 for saving an instruction. More if loop doesn't call subroutines
631 since in that case saving an insn makes more difference
632 and more registers are available. */
633 int threshold;
634 /* Nonzero if we are scanning instructions in a sub-loop. */
635 int loop_depth = 0;
636 int in_libcall;
638 loop->top = 0;
640 movables->head = 0;
641 movables->last = 0;
643 /* Determine whether this loop starts with a jump down to a test at
644 the end. This will occur for a small number of loops with a test
645 that is too complex to duplicate in front of the loop.
647 We search for the first insn or label in the loop, skipping NOTEs.
648 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
649 (because we might have a loop executed only once that contains a
650 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
651 (in case we have a degenerate loop).
653 Note that if we mistakenly think that a loop is entered at the top
654 when, in fact, it is entered at the exit test, the only effect will be
655 slightly poorer optimization. Making the opposite error can generate
656 incorrect code. Since very few loops now start with a jump to the
657 exit test, the code here to detect that case is very conservative. */
659 for (p = NEXT_INSN (loop_start);
660 p != loop_end
661 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
662 && (GET_CODE (p) != NOTE
663 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
664 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
665 p = NEXT_INSN (p))
668 loop->scan_start = p;
670 /* If loop end is the end of the current function, then emit a
671 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
672 note insn. This is the position we use when sinking insns out of
673 the loop. */
674 if (NEXT_INSN (loop->end) != 0)
675 loop->sink = NEXT_INSN (loop->end);
676 else
677 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
679 /* Set up variables describing this loop. */
680 prescan_loop (loop);
681 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
683 /* If loop has a jump before the first label,
684 the true entry is the target of that jump.
685 Start scan from there.
686 But record in LOOP->TOP the place where the end-test jumps
687 back to so we can scan that after the end of the loop. */
688 if (GET_CODE (p) == JUMP_INSN)
690 loop_entry_jump = p;
692 /* Loop entry must be unconditional jump (and not a RETURN) */
693 if (any_uncondjump_p (p)
694 && JUMP_LABEL (p) != 0
695 /* Check to see whether the jump actually
696 jumps out of the loop (meaning it's no loop).
697 This case can happen for things like
698 do {..} while (0). If this label was generated previously
699 by loop, we can't tell anything about it and have to reject
700 the loop. */
701 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
703 loop->top = next_label (loop->scan_start);
704 loop->scan_start = JUMP_LABEL (p);
708 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
709 as required by loop_reg_used_before_p. So skip such loops. (This
710 test may never be true, but it's best to play it safe.)
712 Also, skip loops where we do not start scanning at a label. This
713 test also rejects loops starting with a JUMP_INSN that failed the
714 test above. */
716 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
717 || GET_CODE (loop->scan_start) != CODE_LABEL)
719 if (loop_dump_stream)
720 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
721 INSN_UID (loop_start), INSN_UID (loop_end));
722 return;
725 /* Allocate extra space for REGs that might be created by load_mems.
726 We allocate a little extra slop as well, in the hopes that we
727 won't have to reallocate the regs array. */
728 loop_regs_scan (loop, loop_info->mems_idx + 16);
729 insn_count = count_insns_in_loop (loop);
731 if (loop_dump_stream)
733 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
734 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
735 if (loop->cont)
736 fprintf (loop_dump_stream, "Continue at insn %d.\n",
737 INSN_UID (loop->cont));
740 /* Scan through the loop finding insns that are safe to move.
741 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
742 this reg will be considered invariant for subsequent insns.
743 We consider whether subsequent insns use the reg
744 in deciding whether it is worth actually moving.
746 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
747 and therefore it is possible that the insns we are scanning
748 would never be executed. At such times, we must make sure
749 that it is safe to execute the insn once instead of zero times.
750 When MAYBE_NEVER is 0, all insns will be executed at least once
751 so that is not a problem. */
753 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
754 p != NULL_RTX;
755 p = next_insn_in_loop (loop, p))
757 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
758 in_libcall--;
759 if (GET_CODE (p) == INSN)
761 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
762 if (temp)
763 in_libcall++;
764 if (! in_libcall
765 && (set = single_set (p))
766 && GET_CODE (SET_DEST (set)) == REG
767 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
768 && SET_DEST (set) != pic_offset_table_rtx
769 #endif
770 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
772 int tem1 = 0;
773 int tem2 = 0;
774 int move_insn = 0;
775 rtx src = SET_SRC (set);
776 rtx dependencies = 0;
778 /* Figure out what to use as a source of this insn. If a
779 REG_EQUIV note is given or if a REG_EQUAL note with a
780 constant operand is specified, use it as the source and
781 mark that we should move this insn by calling
782 emit_move_insn rather that duplicating the insn.
784 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
785 note is present. */
786 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
787 if (temp)
788 src = XEXP (temp, 0), move_insn = 1;
789 else
791 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
792 if (temp && CONSTANT_P (XEXP (temp, 0)))
793 src = XEXP (temp, 0), move_insn = 1;
794 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
796 src = XEXP (temp, 0);
797 /* A libcall block can use regs that don't appear in
798 the equivalent expression. To move the libcall,
799 we must move those regs too. */
800 dependencies = libcall_other_reg (p, src);
804 /* For parallels, add any possible uses to the depencies, as
805 we can't move the insn without resolving them first. */
806 if (GET_CODE (PATTERN (p)) == PARALLEL)
808 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
810 rtx x = XVECEXP (PATTERN (p), 0, i);
811 if (GET_CODE (x) == USE)
812 dependencies
813 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
814 dependencies);
818 /* Don't try to optimize a register that was made
819 by loop-optimization for an inner loop.
820 We don't know its life-span, so we can't compute
821 the benefit. */
822 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
824 else if (/* The register is used in basic blocks other
825 than the one where it is set (meaning that
826 something after this point in the loop might
827 depend on its value before the set). */
828 ! reg_in_basic_block_p (p, SET_DEST (set))
829 /* And the set is not guaranteed to be executed once
830 the loop starts, or the value before the set is
831 needed before the set occurs...
833 ??? Note we have quadratic behavior here, mitigated
834 by the fact that the previous test will often fail for
835 large loops. Rather than re-scanning the entire loop
836 each time for register usage, we should build tables
837 of the register usage and use them here instead. */
838 && (maybe_never
839 || loop_reg_used_before_p (loop, set, p)))
840 /* It is unsafe to move the set.
842 This code used to consider it OK to move a set of a variable
843 which was not created by the user and not used in an exit
844 test.
845 That behavior is incorrect and was removed. */
847 else if ((tem = loop_invariant_p (loop, src))
848 && (dependencies == 0
849 || (tem2
850 = loop_invariant_p (loop, dependencies)) != 0)
851 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
852 || (tem1
853 = consec_sets_invariant_p
854 (loop, SET_DEST (set),
855 regs->array[REGNO (SET_DEST (set))].set_in_loop,
856 p)))
857 /* If the insn can cause a trap (such as divide by zero),
858 can't move it unless it's guaranteed to be executed
859 once loop is entered. Even a function call might
860 prevent the trap insn from being reached
861 (since it might exit!) */
862 && ! ((maybe_never || call_passed)
863 && may_trap_p (src)))
865 struct movable *m;
866 int regno = REGNO (SET_DEST (set));
868 /* A potential lossage is where we have a case where two insns
869 can be combined as long as they are both in the loop, but
870 we move one of them outside the loop. For large loops,
871 this can lose. The most common case of this is the address
872 of a function being called.
874 Therefore, if this register is marked as being used
875 exactly once if we are in a loop with calls
876 (a "large loop"), see if we can replace the usage of
877 this register with the source of this SET. If we can,
878 delete this insn.
880 Don't do this if P has a REG_RETVAL note or if we have
881 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
883 if (loop_info->has_call
884 && regs->array[regno].single_usage != 0
885 && regs->array[regno].single_usage != const0_rtx
886 && REGNO_FIRST_UID (regno) == INSN_UID (p)
887 && (REGNO_LAST_UID (regno)
888 == INSN_UID (regs->array[regno].single_usage))
889 && regs->array[regno].set_in_loop == 1
890 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
891 && ! side_effects_p (SET_SRC (set))
892 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
893 && (! SMALL_REGISTER_CLASSES
894 || (! (GET_CODE (SET_SRC (set)) == REG
895 && (REGNO (SET_SRC (set))
896 < FIRST_PSEUDO_REGISTER))))
897 /* This test is not redundant; SET_SRC (set) might be
898 a call-clobbered register and the life of REGNO
899 might span a call. */
900 && ! modified_between_p (SET_SRC (set), p,
901 regs->array[regno].single_usage)
902 && no_labels_between_p (p,
903 regs->array[regno].single_usage)
904 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
905 regs->array[regno].single_usage))
907 /* Replace any usage in a REG_EQUAL note. Must copy
908 the new source, so that we don't get rtx sharing
909 between the SET_SOURCE and REG_NOTES of insn p. */
910 REG_NOTES (regs->array[regno].single_usage)
911 = (replace_rtx
912 (REG_NOTES (regs->array[regno].single_usage),
913 SET_DEST (set), copy_rtx (SET_SRC (set))));
915 delete_insn (p);
916 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
917 i++)
918 regs->array[regno+i].set_in_loop = 0;
919 continue;
922 m = (struct movable *) xmalloc (sizeof (struct movable));
923 m->next = 0;
924 m->insn = p;
925 m->set_src = src;
926 m->dependencies = dependencies;
927 m->set_dest = SET_DEST (set);
928 m->force = 0;
929 m->consec
930 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
931 m->done = 0;
932 m->forces = 0;
933 m->partial = 0;
934 m->move_insn = move_insn;
935 m->move_insn_first = 0;
936 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
937 m->savemode = VOIDmode;
938 m->regno = regno;
939 /* Set M->cond if either loop_invariant_p
940 or consec_sets_invariant_p returned 2
941 (only conditionally invariant). */
942 m->cond = ((tem | tem1 | tem2) > 1);
943 m->global = LOOP_REG_GLOBAL_P (loop, regno);
944 m->match = 0;
945 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
946 m->savings = regs->array[regno].n_times_set;
947 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
948 m->savings += libcall_benefit (p);
949 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
950 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
951 /* Add M to the end of the chain MOVABLES. */
952 loop_movables_add (movables, m);
954 if (m->consec > 0)
956 /* It is possible for the first instruction to have a
957 REG_EQUAL note but a non-invariant SET_SRC, so we must
958 remember the status of the first instruction in case
959 the last instruction doesn't have a REG_EQUAL note. */
960 m->move_insn_first = m->move_insn;
962 /* Skip this insn, not checking REG_LIBCALL notes. */
963 p = next_nonnote_insn (p);
964 /* Skip the consecutive insns, if there are any. */
965 p = skip_consec_insns (p, m->consec);
966 /* Back up to the last insn of the consecutive group. */
967 p = prev_nonnote_insn (p);
969 /* We must now reset m->move_insn, m->is_equiv, and
970 possibly m->set_src to correspond to the effects of
971 all the insns. */
972 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
973 if (temp)
974 m->set_src = XEXP (temp, 0), m->move_insn = 1;
975 else
977 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
978 if (temp && CONSTANT_P (XEXP (temp, 0)))
979 m->set_src = XEXP (temp, 0), m->move_insn = 1;
980 else
981 m->move_insn = 0;
984 m->is_equiv
985 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
988 /* If this register is always set within a STRICT_LOW_PART
989 or set to zero, then its high bytes are constant.
990 So clear them outside the loop and within the loop
991 just load the low bytes.
992 We must check that the machine has an instruction to do so.
993 Also, if the value loaded into the register
994 depends on the same register, this cannot be done. */
995 else if (SET_SRC (set) == const0_rtx
996 && GET_CODE (NEXT_INSN (p)) == INSN
997 && (set1 = single_set (NEXT_INSN (p)))
998 && GET_CODE (set1) == SET
999 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1000 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1001 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1002 == SET_DEST (set))
1003 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1005 int regno = REGNO (SET_DEST (set));
1006 if (regs->array[regno].set_in_loop == 2)
1008 struct movable *m;
1009 m = (struct movable *) xmalloc (sizeof (struct movable));
1010 m->next = 0;
1011 m->insn = p;
1012 m->set_dest = SET_DEST (set);
1013 m->dependencies = 0;
1014 m->force = 0;
1015 m->consec = 0;
1016 m->done = 0;
1017 m->forces = 0;
1018 m->move_insn = 0;
1019 m->move_insn_first = 0;
1020 m->partial = 1;
1021 /* If the insn may not be executed on some cycles,
1022 we can't clear the whole reg; clear just high part.
1023 Not even if the reg is used only within this loop.
1024 Consider this:
1025 while (1)
1026 while (s != t) {
1027 if (foo ()) x = *s;
1028 use (x);
1030 Clearing x before the inner loop could clobber a value
1031 being saved from the last time around the outer loop.
1032 However, if the reg is not used outside this loop
1033 and all uses of the register are in the same
1034 basic block as the store, there is no problem.
1036 If this insn was made by loop, we don't know its
1037 INSN_LUID and hence must make a conservative
1038 assumption. */
1039 m->global = (INSN_UID (p) >= max_uid_for_loop
1040 || LOOP_REG_GLOBAL_P (loop, regno)
1041 || (labels_in_range_p
1042 (p, REGNO_FIRST_LUID (regno))));
1043 if (maybe_never && m->global)
1044 m->savemode = GET_MODE (SET_SRC (set1));
1045 else
1046 m->savemode = VOIDmode;
1047 m->regno = regno;
1048 m->cond = 0;
1049 m->match = 0;
1050 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1051 m->savings = 1;
1052 for (i = 0;
1053 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1054 i++)
1055 regs->array[regno+i].set_in_loop = -1;
1056 /* Add M to the end of the chain MOVABLES. */
1057 loop_movables_add (movables, m);
1062 /* Past a call insn, we get to insns which might not be executed
1063 because the call might exit. This matters for insns that trap.
1064 Constant and pure call insns always return, so they don't count. */
1065 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1066 call_passed = 1;
1067 /* Past a label or a jump, we get to insns for which we
1068 can't count on whether or how many times they will be
1069 executed during each iteration. Therefore, we can
1070 only move out sets of trivial variables
1071 (those not used after the loop). */
1072 /* Similar code appears twice in strength_reduce. */
1073 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1074 /* If we enter the loop in the middle, and scan around to the
1075 beginning, don't set maybe_never for that. This must be an
1076 unconditional jump, otherwise the code at the top of the
1077 loop might never be executed. Unconditional jumps are
1078 followed by a barrier then the loop_end. */
1079 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1080 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1081 && any_uncondjump_p (p)))
1082 maybe_never = 1;
1083 else if (GET_CODE (p) == NOTE)
1085 /* At the virtual top of a converted loop, insns are again known to
1086 be executed: logically, the loop begins here even though the exit
1087 code has been duplicated. */
1088 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1089 maybe_never = call_passed = 0;
1090 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1091 loop_depth++;
1092 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1093 loop_depth--;
1097 /* If one movable subsumes another, ignore that other. */
1099 ignore_some_movables (movables);
1101 /* For each movable insn, see if the reg that it loads
1102 leads when it dies right into another conditionally movable insn.
1103 If so, record that the second insn "forces" the first one,
1104 since the second can be moved only if the first is. */
1106 force_movables (movables);
1108 /* See if there are multiple movable insns that load the same value.
1109 If there are, make all but the first point at the first one
1110 through the `match' field, and add the priorities of them
1111 all together as the priority of the first. */
1113 combine_movables (movables, regs);
1115 /* Now consider each movable insn to decide whether it is worth moving.
1116 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1118 Generally this increases code size, so do not move moveables when
1119 optimizing for code size. */
1121 if (! optimize_size)
1123 move_movables (loop, movables, threshold, insn_count);
1125 /* Recalculate regs->array if move_movables has created new
1126 registers. */
1127 if (max_reg_num () > regs->num)
1129 loop_regs_scan (loop, 0);
1130 for (update_start = loop_start;
1131 PREV_INSN (update_start)
1132 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1133 update_start = PREV_INSN (update_start))
1135 update_end = NEXT_INSN (loop_end);
1137 reg_scan_update (update_start, update_end, loop_max_reg);
1138 loop_max_reg = max_reg_num ();
1142 /* Now candidates that still are negative are those not moved.
1143 Change regs->array[I].set_in_loop to indicate that those are not actually
1144 invariant. */
1145 for (i = 0; i < regs->num; i++)
1146 if (regs->array[i].set_in_loop < 0)
1147 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1149 /* Now that we've moved some things out of the loop, we might be able to
1150 hoist even more memory references. */
1151 load_mems (loop);
1153 /* Recalculate regs->array if load_mems has created new registers. */
1154 if (max_reg_num () > regs->num)
1155 loop_regs_scan (loop, 0);
1157 for (update_start = loop_start;
1158 PREV_INSN (update_start)
1159 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1160 update_start = PREV_INSN (update_start))
1162 update_end = NEXT_INSN (loop_end);
1164 reg_scan_update (update_start, update_end, loop_max_reg);
1165 loop_max_reg = max_reg_num ();
1167 if (flag_strength_reduce)
1169 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1170 /* Ensure our label doesn't go away. */
1171 LABEL_NUSES (update_end)++;
1173 strength_reduce (loop, flags);
1175 reg_scan_update (update_start, update_end, loop_max_reg);
1176 loop_max_reg = max_reg_num ();
1178 if (update_end && GET_CODE (update_end) == CODE_LABEL
1179 && --LABEL_NUSES (update_end) == 0)
1180 delete_related_insns (update_end);
1184 /* The movable information is required for strength reduction. */
1185 loop_movables_free (movables);
1187 free (regs->array);
1188 regs->array = 0;
1189 regs->num = 0;
1192 /* Add elements to *OUTPUT to record all the pseudo-regs
1193 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1195 void
1196 record_excess_regs (in_this, not_in_this, output)
1197 rtx in_this, not_in_this;
1198 rtx *output;
1200 enum rtx_code code;
1201 const char *fmt;
1202 int i;
1204 code = GET_CODE (in_this);
1206 switch (code)
1208 case PC:
1209 case CC0:
1210 case CONST_INT:
1211 case CONST_DOUBLE:
1212 case CONST:
1213 case SYMBOL_REF:
1214 case LABEL_REF:
1215 return;
1217 case REG:
1218 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1219 && ! reg_mentioned_p (in_this, not_in_this))
1220 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1221 return;
1223 default:
1224 break;
1227 fmt = GET_RTX_FORMAT (code);
1228 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1230 int j;
1232 switch (fmt[i])
1234 case 'E':
1235 for (j = 0; j < XVECLEN (in_this, i); j++)
1236 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1237 break;
1239 case 'e':
1240 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1241 break;
1246 /* Check what regs are referred to in the libcall block ending with INSN,
1247 aside from those mentioned in the equivalent value.
1248 If there are none, return 0.
1249 If there are one or more, return an EXPR_LIST containing all of them. */
1252 libcall_other_reg (insn, equiv)
1253 rtx insn, equiv;
1255 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1256 rtx p = XEXP (note, 0);
1257 rtx output = 0;
1259 /* First, find all the regs used in the libcall block
1260 that are not mentioned as inputs to the result. */
1262 while (p != insn)
1264 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1265 || GET_CODE (p) == CALL_INSN)
1266 record_excess_regs (PATTERN (p), equiv, &output);
1267 p = NEXT_INSN (p);
1270 return output;
1273 /* Return 1 if all uses of REG
1274 are between INSN and the end of the basic block. */
1276 static int
1277 reg_in_basic_block_p (insn, reg)
1278 rtx insn, reg;
1280 int regno = REGNO (reg);
1281 rtx p;
1283 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1284 return 0;
1286 /* Search this basic block for the already recorded last use of the reg. */
1287 for (p = insn; p; p = NEXT_INSN (p))
1289 switch (GET_CODE (p))
1291 case NOTE:
1292 break;
1294 case INSN:
1295 case CALL_INSN:
1296 /* Ordinary insn: if this is the last use, we win. */
1297 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1298 return 1;
1299 break;
1301 case JUMP_INSN:
1302 /* Jump insn: if this is the last use, we win. */
1303 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1304 return 1;
1305 /* Otherwise, it's the end of the basic block, so we lose. */
1306 return 0;
1308 case CODE_LABEL:
1309 case BARRIER:
1310 /* It's the end of the basic block, so we lose. */
1311 return 0;
1313 default:
1314 break;
1318 /* The "last use" that was recorded can't be found after the first
1319 use. This can happen when the last use was deleted while
1320 processing an inner loop, this inner loop was then completely
1321 unrolled, and the outer loop is always exited after the inner loop,
1322 so that everything after the first use becomes a single basic block. */
1323 return 1;
1326 /* Compute the benefit of eliminating the insns in the block whose
1327 last insn is LAST. This may be a group of insns used to compute a
1328 value directly or can contain a library call. */
1330 static int
1331 libcall_benefit (last)
1332 rtx last;
1334 rtx insn;
1335 int benefit = 0;
1337 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1338 insn != last; insn = NEXT_INSN (insn))
1340 if (GET_CODE (insn) == CALL_INSN)
1341 benefit += 10; /* Assume at least this many insns in a library
1342 routine. */
1343 else if (GET_CODE (insn) == INSN
1344 && GET_CODE (PATTERN (insn)) != USE
1345 && GET_CODE (PATTERN (insn)) != CLOBBER)
1346 benefit++;
1349 return benefit;
1352 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1354 static rtx
1355 skip_consec_insns (insn, count)
1356 rtx insn;
1357 int count;
1359 for (; count > 0; count--)
1361 rtx temp;
1363 /* If first insn of libcall sequence, skip to end. */
1364 /* Do this at start of loop, since INSN is guaranteed to
1365 be an insn here. */
1366 if (GET_CODE (insn) != NOTE
1367 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1368 insn = XEXP (temp, 0);
1371 insn = NEXT_INSN (insn);
1372 while (GET_CODE (insn) == NOTE);
1375 return insn;
1378 /* Ignore any movable whose insn falls within a libcall
1379 which is part of another movable.
1380 We make use of the fact that the movable for the libcall value
1381 was made later and so appears later on the chain. */
1383 static void
1384 ignore_some_movables (movables)
1385 struct loop_movables *movables;
1387 struct movable *m, *m1;
1389 for (m = movables->head; m; m = m->next)
1391 /* Is this a movable for the value of a libcall? */
1392 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1393 if (note)
1395 rtx insn;
1396 /* Check for earlier movables inside that range,
1397 and mark them invalid. We cannot use LUIDs here because
1398 insns created by loop.c for prior loops don't have LUIDs.
1399 Rather than reject all such insns from movables, we just
1400 explicitly check each insn in the libcall (since invariant
1401 libcalls aren't that common). */
1402 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1403 for (m1 = movables->head; m1 != m; m1 = m1->next)
1404 if (m1->insn == insn)
1405 m1->done = 1;
1410 /* For each movable insn, see if the reg that it loads
1411 leads when it dies right into another conditionally movable insn.
1412 If so, record that the second insn "forces" the first one,
1413 since the second can be moved only if the first is. */
1415 static void
1416 force_movables (movables)
1417 struct loop_movables *movables;
1419 struct movable *m, *m1;
1421 for (m1 = movables->head; m1; m1 = m1->next)
1422 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1423 if (!m1->partial && !m1->done)
1425 int regno = m1->regno;
1426 for (m = m1->next; m; m = m->next)
1427 /* ??? Could this be a bug? What if CSE caused the
1428 register of M1 to be used after this insn?
1429 Since CSE does not update regno_last_uid,
1430 this insn M->insn might not be where it dies.
1431 But very likely this doesn't matter; what matters is
1432 that M's reg is computed from M1's reg. */
1433 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1434 && !m->done)
1435 break;
1436 if (m != 0 && m->set_src == m1->set_dest
1437 /* If m->consec, m->set_src isn't valid. */
1438 && m->consec == 0)
1439 m = 0;
1441 /* Increase the priority of the moving the first insn
1442 since it permits the second to be moved as well. */
1443 if (m != 0)
1445 m->forces = m1;
1446 m1->lifetime += m->lifetime;
1447 m1->savings += m->savings;
1452 /* Find invariant expressions that are equal and can be combined into
1453 one register. */
1455 static void
1456 combine_movables (movables, regs)
1457 struct loop_movables *movables;
1458 struct loop_regs *regs;
1460 struct movable *m;
1461 char *matched_regs = (char *) xmalloc (regs->num);
1462 enum machine_mode mode;
1464 /* Regs that are set more than once are not allowed to match
1465 or be matched. I'm no longer sure why not. */
1466 /* Only pseudo registers are allowed to match or be matched,
1467 since move_movables does not validate the change. */
1468 /* Perhaps testing m->consec_sets would be more appropriate here? */
1470 for (m = movables->head; m; m = m->next)
1471 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1472 && m->regno >= FIRST_PSEUDO_REGISTER
1473 && !m->partial)
1475 struct movable *m1;
1476 int regno = m->regno;
1478 memset (matched_regs, 0, regs->num);
1479 matched_regs[regno] = 1;
1481 /* We want later insns to match the first one. Don't make the first
1482 one match any later ones. So start this loop at m->next. */
1483 for (m1 = m->next; m1; m1 = m1->next)
1484 if (m != m1 && m1->match == 0
1485 && regs->array[m1->regno].n_times_set == 1
1486 && m1->regno >= FIRST_PSEUDO_REGISTER
1487 /* A reg used outside the loop mustn't be eliminated. */
1488 && !m1->global
1489 /* A reg used for zero-extending mustn't be eliminated. */
1490 && !m1->partial
1491 && (matched_regs[m1->regno]
1494 /* Can combine regs with different modes loaded from the
1495 same constant only if the modes are the same or
1496 if both are integer modes with M wider or the same
1497 width as M1. The check for integer is redundant, but
1498 safe, since the only case of differing destination
1499 modes with equal sources is when both sources are
1500 VOIDmode, i.e., CONST_INT. */
1501 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1502 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1503 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1504 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1505 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1506 /* See if the source of M1 says it matches M. */
1507 && ((GET_CODE (m1->set_src) == REG
1508 && matched_regs[REGNO (m1->set_src)])
1509 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1510 movables, regs))))
1511 && ((m->dependencies == m1->dependencies)
1512 || rtx_equal_p (m->dependencies, m1->dependencies)))
1514 m->lifetime += m1->lifetime;
1515 m->savings += m1->savings;
1516 m1->done = 1;
1517 m1->match = m;
1518 matched_regs[m1->regno] = 1;
1522 /* Now combine the regs used for zero-extension.
1523 This can be done for those not marked `global'
1524 provided their lives don't overlap. */
1526 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1527 mode = GET_MODE_WIDER_MODE (mode))
1529 struct movable *m0 = 0;
1531 /* Combine all the registers for extension from mode MODE.
1532 Don't combine any that are used outside this loop. */
1533 for (m = movables->head; m; m = m->next)
1534 if (m->partial && ! m->global
1535 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1537 struct movable *m1;
1539 int first = REGNO_FIRST_LUID (m->regno);
1540 int last = REGNO_LAST_LUID (m->regno);
1542 if (m0 == 0)
1544 /* First one: don't check for overlap, just record it. */
1545 m0 = m;
1546 continue;
1549 /* Make sure they extend to the same mode.
1550 (Almost always true.) */
1551 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1552 continue;
1554 /* We already have one: check for overlap with those
1555 already combined together. */
1556 for (m1 = movables->head; m1 != m; m1 = m1->next)
1557 if (m1 == m0 || (m1->partial && m1->match == m0))
1558 if (! (REGNO_FIRST_LUID (m1->regno) > last
1559 || REGNO_LAST_LUID (m1->regno) < first))
1560 goto overlap;
1562 /* No overlap: we can combine this with the others. */
1563 m0->lifetime += m->lifetime;
1564 m0->savings += m->savings;
1565 m->done = 1;
1566 m->match = m0;
1568 overlap:
1573 /* Clean up. */
1574 free (matched_regs);
1577 /* Returns the number of movable instructions in LOOP that were not
1578 moved outside the loop. */
1580 static int
1581 num_unmoved_movables (loop)
1582 const struct loop *loop;
1584 int num = 0;
1585 struct movable *m;
1587 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1588 if (!m->done)
1589 ++num;
1591 return num;
1595 /* Return 1 if regs X and Y will become the same if moved. */
1597 static int
1598 regs_match_p (x, y, movables)
1599 rtx x, y;
1600 struct loop_movables *movables;
1602 unsigned int xn = REGNO (x);
1603 unsigned int yn = REGNO (y);
1604 struct movable *mx, *my;
1606 for (mx = movables->head; mx; mx = mx->next)
1607 if (mx->regno == xn)
1608 break;
1610 for (my = movables->head; my; my = my->next)
1611 if (my->regno == yn)
1612 break;
1614 return (mx && my
1615 && ((mx->match == my->match && mx->match != 0)
1616 || mx->match == my
1617 || mx == my->match));
1620 /* Return 1 if X and Y are identical-looking rtx's.
1621 This is the Lisp function EQUAL for rtx arguments.
1623 If two registers are matching movables or a movable register and an
1624 equivalent constant, consider them equal. */
1626 static int
1627 rtx_equal_for_loop_p (x, y, movables, regs)
1628 rtx x, y;
1629 struct loop_movables *movables;
1630 struct loop_regs *regs;
1632 int i;
1633 int j;
1634 struct movable *m;
1635 enum rtx_code code;
1636 const char *fmt;
1638 if (x == y)
1639 return 1;
1640 if (x == 0 || y == 0)
1641 return 0;
1643 code = GET_CODE (x);
1645 /* If we have a register and a constant, they may sometimes be
1646 equal. */
1647 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1648 && CONSTANT_P (y))
1650 for (m = movables->head; m; m = m->next)
1651 if (m->move_insn && m->regno == REGNO (x)
1652 && rtx_equal_p (m->set_src, y))
1653 return 1;
1655 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1656 && CONSTANT_P (x))
1658 for (m = movables->head; m; m = m->next)
1659 if (m->move_insn && m->regno == REGNO (y)
1660 && rtx_equal_p (m->set_src, x))
1661 return 1;
1664 /* Otherwise, rtx's of different codes cannot be equal. */
1665 if (code != GET_CODE (y))
1666 return 0;
1668 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1669 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1671 if (GET_MODE (x) != GET_MODE (y))
1672 return 0;
1674 /* These three types of rtx's can be compared nonrecursively. */
1675 if (code == REG)
1676 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1678 if (code == LABEL_REF)
1679 return XEXP (x, 0) == XEXP (y, 0);
1680 if (code == SYMBOL_REF)
1681 return XSTR (x, 0) == XSTR (y, 0);
1683 /* Compare the elements. If any pair of corresponding elements
1684 fail to match, return 0 for the whole things. */
1686 fmt = GET_RTX_FORMAT (code);
1687 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1689 switch (fmt[i])
1691 case 'w':
1692 if (XWINT (x, i) != XWINT (y, i))
1693 return 0;
1694 break;
1696 case 'i':
1697 if (XINT (x, i) != XINT (y, i))
1698 return 0;
1699 break;
1701 case 'E':
1702 /* Two vectors must have the same length. */
1703 if (XVECLEN (x, i) != XVECLEN (y, i))
1704 return 0;
1706 /* And the corresponding elements must match. */
1707 for (j = 0; j < XVECLEN (x, i); j++)
1708 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1709 movables, regs) == 0)
1710 return 0;
1711 break;
1713 case 'e':
1714 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1715 == 0)
1716 return 0;
1717 break;
1719 case 's':
1720 if (strcmp (XSTR (x, i), XSTR (y, i)))
1721 return 0;
1722 break;
1724 case 'u':
1725 /* These are just backpointers, so they don't matter. */
1726 break;
1728 case '0':
1729 break;
1731 /* It is believed that rtx's at this level will never
1732 contain anything but integers and other rtx's,
1733 except for within LABEL_REFs and SYMBOL_REFs. */
1734 default:
1735 abort ();
1738 return 1;
1741 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1742 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1743 references is incremented once for each added note. */
1745 static void
1746 add_label_notes (x, insns)
1747 rtx x;
1748 rtx insns;
1750 enum rtx_code code = GET_CODE (x);
1751 int i, j;
1752 const char *fmt;
1753 rtx insn;
1755 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1757 /* This code used to ignore labels that referred to dispatch tables to
1758 avoid flow generating (slighly) worse code.
1760 We no longer ignore such label references (see LABEL_REF handling in
1761 mark_jump_label for additional information). */
1762 for (insn = insns; insn; insn = NEXT_INSN (insn))
1763 if (reg_mentioned_p (XEXP (x, 0), insn))
1765 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1766 REG_NOTES (insn));
1767 if (LABEL_P (XEXP (x, 0)))
1768 LABEL_NUSES (XEXP (x, 0))++;
1772 fmt = GET_RTX_FORMAT (code);
1773 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1775 if (fmt[i] == 'e')
1776 add_label_notes (XEXP (x, i), insns);
1777 else if (fmt[i] == 'E')
1778 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1779 add_label_notes (XVECEXP (x, i, j), insns);
1783 /* Scan MOVABLES, and move the insns that deserve to be moved.
1784 If two matching movables are combined, replace one reg with the
1785 other throughout. */
1787 static void
1788 move_movables (loop, movables, threshold, insn_count)
1789 struct loop *loop;
1790 struct loop_movables *movables;
1791 int threshold;
1792 int insn_count;
1794 struct loop_regs *regs = LOOP_REGS (loop);
1795 int nregs = regs->num;
1796 rtx new_start = 0;
1797 struct movable *m;
1798 rtx p;
1799 rtx loop_start = loop->start;
1800 rtx loop_end = loop->end;
1801 /* Map of pseudo-register replacements to handle combining
1802 when we move several insns that load the same value
1803 into different pseudo-registers. */
1804 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1805 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1807 for (m = movables->head; m; m = m->next)
1809 /* Describe this movable insn. */
1811 if (loop_dump_stream)
1813 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1814 INSN_UID (m->insn), m->regno, m->lifetime);
1815 if (m->consec > 0)
1816 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1817 if (m->cond)
1818 fprintf (loop_dump_stream, "cond ");
1819 if (m->force)
1820 fprintf (loop_dump_stream, "force ");
1821 if (m->global)
1822 fprintf (loop_dump_stream, "global ");
1823 if (m->done)
1824 fprintf (loop_dump_stream, "done ");
1825 if (m->move_insn)
1826 fprintf (loop_dump_stream, "move-insn ");
1827 if (m->match)
1828 fprintf (loop_dump_stream, "matches %d ",
1829 INSN_UID (m->match->insn));
1830 if (m->forces)
1831 fprintf (loop_dump_stream, "forces %d ",
1832 INSN_UID (m->forces->insn));
1835 /* Ignore the insn if it's already done (it matched something else).
1836 Otherwise, see if it is now safe to move. */
1838 if (!m->done
1839 && (! m->cond
1840 || (1 == loop_invariant_p (loop, m->set_src)
1841 && (m->dependencies == 0
1842 || 1 == loop_invariant_p (loop, m->dependencies))
1843 && (m->consec == 0
1844 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1845 m->consec + 1,
1846 m->insn))))
1847 && (! m->forces || m->forces->done))
1849 int regno;
1850 rtx p;
1851 int savings = m->savings;
1853 /* We have an insn that is safe to move.
1854 Compute its desirability. */
1856 p = m->insn;
1857 regno = m->regno;
1859 if (loop_dump_stream)
1860 fprintf (loop_dump_stream, "savings %d ", savings);
1862 if (regs->array[regno].moved_once && loop_dump_stream)
1863 fprintf (loop_dump_stream, "halved since already moved ");
1865 /* An insn MUST be moved if we already moved something else
1866 which is safe only if this one is moved too: that is,
1867 if already_moved[REGNO] is nonzero. */
1869 /* An insn is desirable to move if the new lifetime of the
1870 register is no more than THRESHOLD times the old lifetime.
1871 If it's not desirable, it means the loop is so big
1872 that moving won't speed things up much,
1873 and it is liable to make register usage worse. */
1875 /* It is also desirable to move if it can be moved at no
1876 extra cost because something else was already moved. */
1878 if (already_moved[regno]
1879 || flag_move_all_movables
1880 || (threshold * savings * m->lifetime) >=
1881 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1882 || (m->forces && m->forces->done
1883 && regs->array[m->forces->regno].n_times_set == 1))
1885 int count;
1886 struct movable *m1;
1887 rtx first = NULL_RTX;
1889 /* Now move the insns that set the reg. */
1891 if (m->partial && m->match)
1893 rtx newpat, i1;
1894 rtx r1, r2;
1895 /* Find the end of this chain of matching regs.
1896 Thus, we load each reg in the chain from that one reg.
1897 And that reg is loaded with 0 directly,
1898 since it has ->match == 0. */
1899 for (m1 = m; m1->match; m1 = m1->match);
1900 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1901 SET_DEST (PATTERN (m1->insn)));
1902 i1 = loop_insn_hoist (loop, newpat);
1904 /* Mark the moved, invariant reg as being allowed to
1905 share a hard reg with the other matching invariant. */
1906 REG_NOTES (i1) = REG_NOTES (m->insn);
1907 r1 = SET_DEST (PATTERN (m->insn));
1908 r2 = SET_DEST (PATTERN (m1->insn));
1909 regs_may_share
1910 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1911 gen_rtx_EXPR_LIST (VOIDmode, r2,
1912 regs_may_share));
1913 delete_insn (m->insn);
1915 if (new_start == 0)
1916 new_start = i1;
1918 if (loop_dump_stream)
1919 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1921 /* If we are to re-generate the item being moved with a
1922 new move insn, first delete what we have and then emit
1923 the move insn before the loop. */
1924 else if (m->move_insn)
1926 rtx i1, temp, seq;
1928 for (count = m->consec; count >= 0; count--)
1930 /* If this is the first insn of a library call sequence,
1931 something is very wrong. */
1932 if (GET_CODE (p) != NOTE
1933 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1934 abort ();
1936 /* If this is the last insn of a libcall sequence, then
1937 delete every insn in the sequence except the last.
1938 The last insn is handled in the normal manner. */
1939 if (GET_CODE (p) != NOTE
1940 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1942 temp = XEXP (temp, 0);
1943 while (temp != p)
1944 temp = delete_insn (temp);
1947 temp = p;
1948 p = delete_insn (p);
1950 /* simplify_giv_expr expects that it can walk the insns
1951 at m->insn forwards and see this old sequence we are
1952 tossing here. delete_insn does preserve the next
1953 pointers, but when we skip over a NOTE we must fix
1954 it up. Otherwise that code walks into the non-deleted
1955 insn stream. */
1956 while (p && GET_CODE (p) == NOTE)
1957 p = NEXT_INSN (temp) = NEXT_INSN (p);
1960 start_sequence ();
1961 emit_move_insn (m->set_dest, m->set_src);
1962 seq = get_insns ();
1963 end_sequence ();
1965 add_label_notes (m->set_src, seq);
1967 i1 = loop_insn_hoist (loop, seq);
1968 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1969 set_unique_reg_note (i1,
1970 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1971 m->set_src);
1973 if (loop_dump_stream)
1974 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1976 /* The more regs we move, the less we like moving them. */
1977 threshold -= 3;
1979 else
1981 for (count = m->consec; count >= 0; count--)
1983 rtx i1, temp;
1985 /* If first insn of libcall sequence, skip to end. */
1986 /* Do this at start of loop, since p is guaranteed to
1987 be an insn here. */
1988 if (GET_CODE (p) != NOTE
1989 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1990 p = XEXP (temp, 0);
1992 /* If last insn of libcall sequence, move all
1993 insns except the last before the loop. The last
1994 insn is handled in the normal manner. */
1995 if (GET_CODE (p) != NOTE
1996 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1998 rtx fn_address = 0;
1999 rtx fn_reg = 0;
2000 rtx fn_address_insn = 0;
2002 first = 0;
2003 for (temp = XEXP (temp, 0); temp != p;
2004 temp = NEXT_INSN (temp))
2006 rtx body;
2007 rtx n;
2008 rtx next;
2010 if (GET_CODE (temp) == NOTE)
2011 continue;
2013 body = PATTERN (temp);
2015 /* Find the next insn after TEMP,
2016 not counting USE or NOTE insns. */
2017 for (next = NEXT_INSN (temp); next != p;
2018 next = NEXT_INSN (next))
2019 if (! (GET_CODE (next) == INSN
2020 && GET_CODE (PATTERN (next)) == USE)
2021 && GET_CODE (next) != NOTE)
2022 break;
2024 /* If that is the call, this may be the insn
2025 that loads the function address.
2027 Extract the function address from the insn
2028 that loads it into a register.
2029 If this insn was cse'd, we get incorrect code.
2031 So emit a new move insn that copies the
2032 function address into the register that the
2033 call insn will use. flow.c will delete any
2034 redundant stores that we have created. */
2035 if (GET_CODE (next) == CALL_INSN
2036 && GET_CODE (body) == SET
2037 && GET_CODE (SET_DEST (body)) == REG
2038 && (n = find_reg_note (temp, REG_EQUAL,
2039 NULL_RTX)))
2041 fn_reg = SET_SRC (body);
2042 if (GET_CODE (fn_reg) != REG)
2043 fn_reg = SET_DEST (body);
2044 fn_address = XEXP (n, 0);
2045 fn_address_insn = temp;
2047 /* We have the call insn.
2048 If it uses the register we suspect it might,
2049 load it with the correct address directly. */
2050 if (GET_CODE (temp) == CALL_INSN
2051 && fn_address != 0
2052 && reg_referenced_p (fn_reg, body))
2053 loop_insn_emit_after (loop, 0, fn_address_insn,
2054 gen_move_insn
2055 (fn_reg, fn_address));
2057 if (GET_CODE (temp) == CALL_INSN)
2059 i1 = loop_call_insn_hoist (loop, body);
2060 /* Because the USAGE information potentially
2061 contains objects other than hard registers
2062 we need to copy it. */
2063 if (CALL_INSN_FUNCTION_USAGE (temp))
2064 CALL_INSN_FUNCTION_USAGE (i1)
2065 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2067 else
2068 i1 = loop_insn_hoist (loop, body);
2069 if (first == 0)
2070 first = i1;
2071 if (temp == fn_address_insn)
2072 fn_address_insn = i1;
2073 REG_NOTES (i1) = REG_NOTES (temp);
2074 REG_NOTES (temp) = NULL;
2075 delete_insn (temp);
2077 if (new_start == 0)
2078 new_start = first;
2080 if (m->savemode != VOIDmode)
2082 /* P sets REG to zero; but we should clear only
2083 the bits that are not covered by the mode
2084 m->savemode. */
2085 rtx reg = m->set_dest;
2086 rtx sequence;
2087 rtx tem;
2089 start_sequence ();
2090 tem = expand_simple_binop
2091 (GET_MODE (reg), AND, reg,
2092 GEN_INT ((((HOST_WIDE_INT) 1
2093 << GET_MODE_BITSIZE (m->savemode)))
2094 - 1),
2095 reg, 1, OPTAB_LIB_WIDEN);
2096 if (tem == 0)
2097 abort ();
2098 if (tem != reg)
2099 emit_move_insn (reg, tem);
2100 sequence = get_insns ();
2101 end_sequence ();
2102 i1 = loop_insn_hoist (loop, sequence);
2104 else if (GET_CODE (p) == CALL_INSN)
2106 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2107 /* Because the USAGE information potentially
2108 contains objects other than hard registers
2109 we need to copy it. */
2110 if (CALL_INSN_FUNCTION_USAGE (p))
2111 CALL_INSN_FUNCTION_USAGE (i1)
2112 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2114 else if (count == m->consec && m->move_insn_first)
2116 rtx seq;
2117 /* The SET_SRC might not be invariant, so we must
2118 use the REG_EQUAL note. */
2119 start_sequence ();
2120 emit_move_insn (m->set_dest, m->set_src);
2121 seq = get_insns ();
2122 end_sequence ();
2124 add_label_notes (m->set_src, seq);
2126 i1 = loop_insn_hoist (loop, seq);
2127 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2128 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2129 : REG_EQUAL, m->set_src);
2131 else
2132 i1 = loop_insn_hoist (loop, PATTERN (p));
2134 if (REG_NOTES (i1) == 0)
2136 REG_NOTES (i1) = REG_NOTES (p);
2137 REG_NOTES (p) = NULL;
2139 /* If there is a REG_EQUAL note present whose value
2140 is not loop invariant, then delete it, since it
2141 may cause problems with later optimization passes.
2142 It is possible for cse to create such notes
2143 like this as a result of record_jump_cond. */
2145 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2146 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2147 remove_note (i1, temp);
2150 if (new_start == 0)
2151 new_start = i1;
2153 if (loop_dump_stream)
2154 fprintf (loop_dump_stream, " moved to %d",
2155 INSN_UID (i1));
2157 /* If library call, now fix the REG_NOTES that contain
2158 insn pointers, namely REG_LIBCALL on FIRST
2159 and REG_RETVAL on I1. */
2160 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2162 XEXP (temp, 0) = first;
2163 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2164 XEXP (temp, 0) = i1;
2167 temp = p;
2168 delete_insn (p);
2169 p = NEXT_INSN (p);
2171 /* simplify_giv_expr expects that it can walk the insns
2172 at m->insn forwards and see this old sequence we are
2173 tossing here. delete_insn does preserve the next
2174 pointers, but when we skip over a NOTE we must fix
2175 it up. Otherwise that code walks into the non-deleted
2176 insn stream. */
2177 while (p && GET_CODE (p) == NOTE)
2178 p = NEXT_INSN (temp) = NEXT_INSN (p);
2181 /* The more regs we move, the less we like moving them. */
2182 threshold -= 3;
2185 /* Any other movable that loads the same register
2186 MUST be moved. */
2187 already_moved[regno] = 1;
2189 /* This reg has been moved out of one loop. */
2190 regs->array[regno].moved_once = 1;
2192 /* The reg set here is now invariant. */
2193 if (! m->partial)
2195 int i;
2196 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2197 regs->array[regno+i].set_in_loop = 0;
2200 m->done = 1;
2202 /* Change the length-of-life info for the register
2203 to say it lives at least the full length of this loop.
2204 This will help guide optimizations in outer loops. */
2206 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2207 /* This is the old insn before all the moved insns.
2208 We can't use the moved insn because it is out of range
2209 in uid_luid. Only the old insns have luids. */
2210 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2211 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2212 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2214 /* Combine with this moved insn any other matching movables. */
2216 if (! m->partial)
2217 for (m1 = movables->head; m1; m1 = m1->next)
2218 if (m1->match == m)
2220 rtx temp;
2222 /* Schedule the reg loaded by M1
2223 for replacement so that shares the reg of M.
2224 If the modes differ (only possible in restricted
2225 circumstances, make a SUBREG.
2227 Note this assumes that the target dependent files
2228 treat REG and SUBREG equally, including within
2229 GO_IF_LEGITIMATE_ADDRESS and in all the
2230 predicates since we never verify that replacing the
2231 original register with a SUBREG results in a
2232 recognizable insn. */
2233 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2234 reg_map[m1->regno] = m->set_dest;
2235 else
2236 reg_map[m1->regno]
2237 = gen_lowpart_common (GET_MODE (m1->set_dest),
2238 m->set_dest);
2240 /* Get rid of the matching insn
2241 and prevent further processing of it. */
2242 m1->done = 1;
2244 /* if library call, delete all insns. */
2245 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2246 NULL_RTX)))
2247 delete_insn_chain (XEXP (temp, 0), m1->insn);
2248 else
2249 delete_insn (m1->insn);
2251 /* Any other movable that loads the same register
2252 MUST be moved. */
2253 already_moved[m1->regno] = 1;
2255 /* The reg merged here is now invariant,
2256 if the reg it matches is invariant. */
2257 if (! m->partial)
2259 int i;
2260 for (i = 0;
2261 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2262 i++)
2263 regs->array[m1->regno+i].set_in_loop = 0;
2267 else if (loop_dump_stream)
2268 fprintf (loop_dump_stream, "not desirable");
2270 else if (loop_dump_stream && !m->match)
2271 fprintf (loop_dump_stream, "not safe");
2273 if (loop_dump_stream)
2274 fprintf (loop_dump_stream, "\n");
2277 if (new_start == 0)
2278 new_start = loop_start;
2280 /* Go through all the instructions in the loop, making
2281 all the register substitutions scheduled in REG_MAP. */
2282 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2283 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2284 || GET_CODE (p) == CALL_INSN)
2286 replace_regs (PATTERN (p), reg_map, nregs, 0);
2287 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2288 INSN_CODE (p) = -1;
2291 /* Clean up. */
2292 free (reg_map);
2293 free (already_moved);
2297 static void
2298 loop_movables_add (movables, m)
2299 struct loop_movables *movables;
2300 struct movable *m;
2302 if (movables->head == 0)
2303 movables->head = m;
2304 else
2305 movables->last->next = m;
2306 movables->last = m;
2310 static void
2311 loop_movables_free (movables)
2312 struct loop_movables *movables;
2314 struct movable *m;
2315 struct movable *m_next;
2317 for (m = movables->head; m; m = m_next)
2319 m_next = m->next;
2320 free (m);
2324 #if 0
2325 /* Scan X and replace the address of any MEM in it with ADDR.
2326 REG is the address that MEM should have before the replacement. */
2328 static void
2329 replace_call_address (x, reg, addr)
2330 rtx x, reg, addr;
2332 enum rtx_code code;
2333 int i;
2334 const char *fmt;
2336 if (x == 0)
2337 return;
2338 code = GET_CODE (x);
2339 switch (code)
2341 case PC:
2342 case CC0:
2343 case CONST_INT:
2344 case CONST_DOUBLE:
2345 case CONST:
2346 case SYMBOL_REF:
2347 case LABEL_REF:
2348 case REG:
2349 return;
2351 case SET:
2352 /* Short cut for very common case. */
2353 replace_call_address (XEXP (x, 1), reg, addr);
2354 return;
2356 case CALL:
2357 /* Short cut for very common case. */
2358 replace_call_address (XEXP (x, 0), reg, addr);
2359 return;
2361 case MEM:
2362 /* If this MEM uses a reg other than the one we expected,
2363 something is wrong. */
2364 if (XEXP (x, 0) != reg)
2365 abort ();
2366 XEXP (x, 0) = addr;
2367 return;
2369 default:
2370 break;
2373 fmt = GET_RTX_FORMAT (code);
2374 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2376 if (fmt[i] == 'e')
2377 replace_call_address (XEXP (x, i), reg, addr);
2378 else if (fmt[i] == 'E')
2380 int j;
2381 for (j = 0; j < XVECLEN (x, i); j++)
2382 replace_call_address (XVECEXP (x, i, j), reg, addr);
2386 #endif
2388 /* Return the number of memory refs to addresses that vary
2389 in the rtx X. */
2391 static int
2392 count_nonfixed_reads (loop, x)
2393 const struct loop *loop;
2394 rtx x;
2396 enum rtx_code code;
2397 int i;
2398 const char *fmt;
2399 int value;
2401 if (x == 0)
2402 return 0;
2404 code = GET_CODE (x);
2405 switch (code)
2407 case PC:
2408 case CC0:
2409 case CONST_INT:
2410 case CONST_DOUBLE:
2411 case CONST:
2412 case SYMBOL_REF:
2413 case LABEL_REF:
2414 case REG:
2415 return 0;
2417 case MEM:
2418 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2419 + count_nonfixed_reads (loop, XEXP (x, 0)));
2421 default:
2422 break;
2425 value = 0;
2426 fmt = GET_RTX_FORMAT (code);
2427 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2429 if (fmt[i] == 'e')
2430 value += count_nonfixed_reads (loop, XEXP (x, i));
2431 if (fmt[i] == 'E')
2433 int j;
2434 for (j = 0; j < XVECLEN (x, i); j++)
2435 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2438 return value;
2441 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2442 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2443 `unknown_address_altered', `unknown_constant_address_altered', and
2444 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2445 list `store_mems' in LOOP. */
2447 static void
2448 prescan_loop (loop)
2449 struct loop *loop;
2451 int level = 1;
2452 rtx insn;
2453 struct loop_info *loop_info = LOOP_INFO (loop);
2454 rtx start = loop->start;
2455 rtx end = loop->end;
2456 /* The label after END. Jumping here is just like falling off the
2457 end of the loop. We use next_nonnote_insn instead of next_label
2458 as a hedge against the (pathological) case where some actual insn
2459 might end up between the two. */
2460 rtx exit_target = next_nonnote_insn (end);
2462 loop_info->has_indirect_jump = indirect_jump_in_function;
2463 loop_info->pre_header_has_call = 0;
2464 loop_info->has_call = 0;
2465 loop_info->has_nonconst_call = 0;
2466 loop_info->has_prefetch = 0;
2467 loop_info->has_volatile = 0;
2468 loop_info->has_tablejump = 0;
2469 loop_info->has_multiple_exit_targets = 0;
2470 loop->level = 1;
2472 loop_info->unknown_address_altered = 0;
2473 loop_info->unknown_constant_address_altered = 0;
2474 loop_info->store_mems = NULL_RTX;
2475 loop_info->first_loop_store_insn = NULL_RTX;
2476 loop_info->mems_idx = 0;
2477 loop_info->num_mem_sets = 0;
2478 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2479 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2481 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2482 insn = PREV_INSN (insn))
2484 if (GET_CODE (insn) == CALL_INSN)
2486 loop_info->pre_header_has_call = 1;
2487 break;
2491 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2492 insn = NEXT_INSN (insn))
2494 switch (GET_CODE (insn))
2496 case NOTE:
2497 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2499 ++level;
2500 /* Count number of loops contained in this one. */
2501 loop->level++;
2503 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2504 --level;
2505 break;
2507 case CALL_INSN:
2508 if (! CONST_OR_PURE_CALL_P (insn))
2510 loop_info->unknown_address_altered = 1;
2511 loop_info->has_nonconst_call = 1;
2513 else if (pure_call_p (insn))
2514 loop_info->has_nonconst_call = 1;
2515 loop_info->has_call = 1;
2516 if (can_throw_internal (insn))
2517 loop_info->has_multiple_exit_targets = 1;
2518 break;
2520 case JUMP_INSN:
2521 if (! loop_info->has_multiple_exit_targets)
2523 rtx set = pc_set (insn);
2525 if (set)
2527 rtx src = SET_SRC (set);
2528 rtx label1, label2;
2530 if (GET_CODE (src) == IF_THEN_ELSE)
2532 label1 = XEXP (src, 1);
2533 label2 = XEXP (src, 2);
2535 else
2537 label1 = src;
2538 label2 = NULL_RTX;
2543 if (label1 && label1 != pc_rtx)
2545 if (GET_CODE (label1) != LABEL_REF)
2547 /* Something tricky. */
2548 loop_info->has_multiple_exit_targets = 1;
2549 break;
2551 else if (XEXP (label1, 0) != exit_target
2552 && LABEL_OUTSIDE_LOOP_P (label1))
2554 /* A jump outside the current loop. */
2555 loop_info->has_multiple_exit_targets = 1;
2556 break;
2560 label1 = label2;
2561 label2 = NULL_RTX;
2563 while (label1);
2565 else
2567 /* A return, or something tricky. */
2568 loop_info->has_multiple_exit_targets = 1;
2571 /* FALLTHRU */
2573 case INSN:
2574 if (volatile_refs_p (PATTERN (insn)))
2575 loop_info->has_volatile = 1;
2577 if (GET_CODE (insn) == JUMP_INSN
2578 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2579 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2580 loop_info->has_tablejump = 1;
2582 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2583 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2584 loop_info->first_loop_store_insn = insn;
2586 if (flag_non_call_exceptions && can_throw_internal (insn))
2587 loop_info->has_multiple_exit_targets = 1;
2588 break;
2590 default:
2591 break;
2595 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2596 if (/* An exception thrown by a called function might land us
2597 anywhere. */
2598 ! loop_info->has_nonconst_call
2599 /* We don't want loads for MEMs moved to a location before the
2600 one at which their stack memory becomes allocated. (Note
2601 that this is not a problem for malloc, etc., since those
2602 require actual function calls. */
2603 && ! current_function_calls_alloca
2604 /* There are ways to leave the loop other than falling off the
2605 end. */
2606 && ! loop_info->has_multiple_exit_targets)
2607 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2608 insn = NEXT_INSN (insn))
2609 for_each_rtx (&insn, insert_loop_mem, loop_info);
2611 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2612 that loop_invariant_p and load_mems can use true_dependence
2613 to determine what is really clobbered. */
2614 if (loop_info->unknown_address_altered)
2616 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2618 loop_info->store_mems
2619 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2621 if (loop_info->unknown_constant_address_altered)
2623 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2625 RTX_UNCHANGING_P (mem) = 1;
2626 loop_info->store_mems
2627 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2631 /* Invalidate all loops containing LABEL. */
2633 static void
2634 invalidate_loops_containing_label (label)
2635 rtx label;
2637 struct loop *loop;
2638 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2639 loop->invalid = 1;
2642 /* Scan the function looking for loops. Record the start and end of each loop.
2643 Also mark as invalid loops any loops that contain a setjmp or are branched
2644 to from outside the loop. */
2646 static void
2647 find_and_verify_loops (f, loops)
2648 rtx f;
2649 struct loops *loops;
2651 rtx insn;
2652 rtx label;
2653 int num_loops;
2654 struct loop *current_loop;
2655 struct loop *next_loop;
2656 struct loop *loop;
2658 num_loops = loops->num;
2660 compute_luids (f, NULL_RTX, 0);
2662 /* If there are jumps to undefined labels,
2663 treat them as jumps out of any/all loops.
2664 This also avoids writing past end of tables when there are no loops. */
2665 uid_loop[0] = NULL;
2667 /* Find boundaries of loops, mark which loops are contained within
2668 loops, and invalidate loops that have setjmp. */
2670 num_loops = 0;
2671 current_loop = NULL;
2672 for (insn = f; insn; insn = NEXT_INSN (insn))
2674 if (GET_CODE (insn) == NOTE)
2675 switch (NOTE_LINE_NUMBER (insn))
2677 case NOTE_INSN_LOOP_BEG:
2678 next_loop = loops->array + num_loops;
2679 next_loop->num = num_loops;
2680 num_loops++;
2681 next_loop->start = insn;
2682 next_loop->outer = current_loop;
2683 current_loop = next_loop;
2684 break;
2686 case NOTE_INSN_LOOP_CONT:
2687 current_loop->cont = insn;
2688 break;
2690 case NOTE_INSN_LOOP_VTOP:
2691 current_loop->vtop = insn;
2692 break;
2694 case NOTE_INSN_LOOP_END:
2695 if (! current_loop)
2696 abort ();
2698 current_loop->end = insn;
2699 current_loop = current_loop->outer;
2700 break;
2702 default:
2703 break;
2706 if (GET_CODE (insn) == CALL_INSN
2707 && find_reg_note (insn, REG_SETJMP, NULL))
2709 /* In this case, we must invalidate our current loop and any
2710 enclosing loop. */
2711 for (loop = current_loop; loop; loop = loop->outer)
2713 loop->invalid = 1;
2714 if (loop_dump_stream)
2715 fprintf (loop_dump_stream,
2716 "\nLoop at %d ignored due to setjmp.\n",
2717 INSN_UID (loop->start));
2721 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2722 enclosing loop, but this doesn't matter. */
2723 uid_loop[INSN_UID (insn)] = current_loop;
2726 /* Any loop containing a label used in an initializer must be invalidated,
2727 because it can be jumped into from anywhere. */
2728 for (label = forced_labels; label; label = XEXP (label, 1))
2729 invalidate_loops_containing_label (XEXP (label, 0));
2731 /* Any loop containing a label used for an exception handler must be
2732 invalidated, because it can be jumped into from anywhere. */
2733 for_each_eh_label (invalidate_loops_containing_label);
2735 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2736 loop that it is not contained within, that loop is marked invalid.
2737 If any INSN or CALL_INSN uses a label's address, then the loop containing
2738 that label is marked invalid, because it could be jumped into from
2739 anywhere.
2741 Also look for blocks of code ending in an unconditional branch that
2742 exits the loop. If such a block is surrounded by a conditional
2743 branch around the block, move the block elsewhere (see below) and
2744 invert the jump to point to the code block. This may eliminate a
2745 label in our loop and will simplify processing by both us and a
2746 possible second cse pass. */
2748 for (insn = f; insn; insn = NEXT_INSN (insn))
2749 if (INSN_P (insn))
2751 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2753 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2755 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2756 if (note)
2757 invalidate_loops_containing_label (XEXP (note, 0));
2760 if (GET_CODE (insn) != JUMP_INSN)
2761 continue;
2763 mark_loop_jump (PATTERN (insn), this_loop);
2765 /* See if this is an unconditional branch outside the loop. */
2766 if (this_loop
2767 && (GET_CODE (PATTERN (insn)) == RETURN
2768 || (any_uncondjump_p (insn)
2769 && onlyjump_p (insn)
2770 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2771 != this_loop)))
2772 && get_max_uid () < max_uid_for_loop)
2774 rtx p;
2775 rtx our_next = next_real_insn (insn);
2776 rtx last_insn_to_move = NEXT_INSN (insn);
2777 struct loop *dest_loop;
2778 struct loop *outer_loop = NULL;
2780 /* Go backwards until we reach the start of the loop, a label,
2781 or a JUMP_INSN. */
2782 for (p = PREV_INSN (insn);
2783 GET_CODE (p) != CODE_LABEL
2784 && ! (GET_CODE (p) == NOTE
2785 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2786 && GET_CODE (p) != JUMP_INSN;
2787 p = PREV_INSN (p))
2790 /* Check for the case where we have a jump to an inner nested
2791 loop, and do not perform the optimization in that case. */
2793 if (JUMP_LABEL (insn))
2795 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2796 if (dest_loop)
2798 for (outer_loop = dest_loop; outer_loop;
2799 outer_loop = outer_loop->outer)
2800 if (outer_loop == this_loop)
2801 break;
2805 /* Make sure that the target of P is within the current loop. */
2807 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2808 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2809 outer_loop = this_loop;
2811 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2812 we have a block of code to try to move.
2814 We look backward and then forward from the target of INSN
2815 to find a BARRIER at the same loop depth as the target.
2816 If we find such a BARRIER, we make a new label for the start
2817 of the block, invert the jump in P and point it to that label,
2818 and move the block of code to the spot we found. */
2820 if (! outer_loop
2821 && GET_CODE (p) == JUMP_INSN
2822 && JUMP_LABEL (p) != 0
2823 /* Just ignore jumps to labels that were never emitted.
2824 These always indicate compilation errors. */
2825 && INSN_UID (JUMP_LABEL (p)) != 0
2826 && any_condjump_p (p) && onlyjump_p (p)
2827 && next_real_insn (JUMP_LABEL (p)) == our_next
2828 /* If it's not safe to move the sequence, then we
2829 mustn't try. */
2830 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2831 &last_insn_to_move))
2833 rtx target
2834 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2835 struct loop *target_loop = uid_loop[INSN_UID (target)];
2836 rtx loc, loc2;
2837 rtx tmp;
2839 /* Search for possible garbage past the conditional jumps
2840 and look for the last barrier. */
2841 for (tmp = last_insn_to_move;
2842 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2843 if (GET_CODE (tmp) == BARRIER)
2844 last_insn_to_move = tmp;
2846 for (loc = target; loc; loc = PREV_INSN (loc))
2847 if (GET_CODE (loc) == BARRIER
2848 /* Don't move things inside a tablejump. */
2849 && ((loc2 = next_nonnote_insn (loc)) == 0
2850 || GET_CODE (loc2) != CODE_LABEL
2851 || (loc2 = next_nonnote_insn (loc2)) == 0
2852 || GET_CODE (loc2) != JUMP_INSN
2853 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2854 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2855 && uid_loop[INSN_UID (loc)] == target_loop)
2856 break;
2858 if (loc == 0)
2859 for (loc = target; loc; loc = NEXT_INSN (loc))
2860 if (GET_CODE (loc) == BARRIER
2861 /* Don't move things inside a tablejump. */
2862 && ((loc2 = next_nonnote_insn (loc)) == 0
2863 || GET_CODE (loc2) != CODE_LABEL
2864 || (loc2 = next_nonnote_insn (loc2)) == 0
2865 || GET_CODE (loc2) != JUMP_INSN
2866 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2867 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2868 && uid_loop[INSN_UID (loc)] == target_loop)
2869 break;
2871 if (loc)
2873 rtx cond_label = JUMP_LABEL (p);
2874 rtx new_label = get_label_after (p);
2876 /* Ensure our label doesn't go away. */
2877 LABEL_NUSES (cond_label)++;
2879 /* Verify that uid_loop is large enough and that
2880 we can invert P. */
2881 if (invert_jump (p, new_label, 1))
2883 rtx q, r;
2885 /* If no suitable BARRIER was found, create a suitable
2886 one before TARGET. Since TARGET is a fall through
2887 path, we'll need to insert an jump around our block
2888 and add a BARRIER before TARGET.
2890 This creates an extra unconditional jump outside
2891 the loop. However, the benefits of removing rarely
2892 executed instructions from inside the loop usually
2893 outweighs the cost of the extra unconditional jump
2894 outside the loop. */
2895 if (loc == 0)
2897 rtx temp;
2899 temp = gen_jump (JUMP_LABEL (insn));
2900 temp = emit_jump_insn_before (temp, target);
2901 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2902 LABEL_NUSES (JUMP_LABEL (insn))++;
2903 loc = emit_barrier_before (target);
2906 /* Include the BARRIER after INSN and copy the
2907 block after LOC. */
2908 if (squeeze_notes (&new_label, &last_insn_to_move))
2909 abort ();
2910 reorder_insns (new_label, last_insn_to_move, loc);
2912 /* All those insns are now in TARGET_LOOP. */
2913 for (q = new_label;
2914 q != NEXT_INSN (last_insn_to_move);
2915 q = NEXT_INSN (q))
2916 uid_loop[INSN_UID (q)] = target_loop;
2918 /* The label jumped to by INSN is no longer a loop
2919 exit. Unless INSN does not have a label (e.g.,
2920 it is a RETURN insn), search loop->exit_labels
2921 to find its label_ref, and remove it. Also turn
2922 off LABEL_OUTSIDE_LOOP_P bit. */
2923 if (JUMP_LABEL (insn))
2925 for (q = 0, r = this_loop->exit_labels;
2927 q = r, r = LABEL_NEXTREF (r))
2928 if (XEXP (r, 0) == JUMP_LABEL (insn))
2930 LABEL_OUTSIDE_LOOP_P (r) = 0;
2931 if (q)
2932 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2933 else
2934 this_loop->exit_labels = LABEL_NEXTREF (r);
2935 break;
2938 for (loop = this_loop; loop && loop != target_loop;
2939 loop = loop->outer)
2940 loop->exit_count--;
2942 /* If we didn't find it, then something is
2943 wrong. */
2944 if (! r)
2945 abort ();
2948 /* P is now a jump outside the loop, so it must be put
2949 in loop->exit_labels, and marked as such.
2950 The easiest way to do this is to just call
2951 mark_loop_jump again for P. */
2952 mark_loop_jump (PATTERN (p), this_loop);
2954 /* If INSN now jumps to the insn after it,
2955 delete INSN. */
2956 if (JUMP_LABEL (insn) != 0
2957 && (next_real_insn (JUMP_LABEL (insn))
2958 == next_real_insn (insn)))
2959 delete_related_insns (insn);
2962 /* Continue the loop after where the conditional
2963 branch used to jump, since the only branch insn
2964 in the block (if it still remains) is an inter-loop
2965 branch and hence needs no processing. */
2966 insn = NEXT_INSN (cond_label);
2968 if (--LABEL_NUSES (cond_label) == 0)
2969 delete_related_insns (cond_label);
2971 /* This loop will be continued with NEXT_INSN (insn). */
2972 insn = PREV_INSN (insn);
2979 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2980 loops it is contained in, mark the target loop invalid.
2982 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2984 static void
2985 mark_loop_jump (x, loop)
2986 rtx x;
2987 struct loop *loop;
2989 struct loop *dest_loop;
2990 struct loop *outer_loop;
2991 int i;
2993 switch (GET_CODE (x))
2995 case PC:
2996 case USE:
2997 case CLOBBER:
2998 case REG:
2999 case MEM:
3000 case CONST_INT:
3001 case CONST_DOUBLE:
3002 case RETURN:
3003 return;
3005 case CONST:
3006 /* There could be a label reference in here. */
3007 mark_loop_jump (XEXP (x, 0), loop);
3008 return;
3010 case PLUS:
3011 case MINUS:
3012 case MULT:
3013 mark_loop_jump (XEXP (x, 0), loop);
3014 mark_loop_jump (XEXP (x, 1), loop);
3015 return;
3017 case LO_SUM:
3018 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3019 mark_loop_jump (XEXP (x, 1), loop);
3020 return;
3022 case SIGN_EXTEND:
3023 case ZERO_EXTEND:
3024 mark_loop_jump (XEXP (x, 0), loop);
3025 return;
3027 case LABEL_REF:
3028 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3030 /* Link together all labels that branch outside the loop. This
3031 is used by final_[bg]iv_value and the loop unrolling code. Also
3032 mark this LABEL_REF so we know that this branch should predict
3033 false. */
3035 /* A check to make sure the label is not in an inner nested loop,
3036 since this does not count as a loop exit. */
3037 if (dest_loop)
3039 for (outer_loop = dest_loop; outer_loop;
3040 outer_loop = outer_loop->outer)
3041 if (outer_loop == loop)
3042 break;
3044 else
3045 outer_loop = NULL;
3047 if (loop && ! outer_loop)
3049 LABEL_OUTSIDE_LOOP_P (x) = 1;
3050 LABEL_NEXTREF (x) = loop->exit_labels;
3051 loop->exit_labels = x;
3053 for (outer_loop = loop;
3054 outer_loop && outer_loop != dest_loop;
3055 outer_loop = outer_loop->outer)
3056 outer_loop->exit_count++;
3059 /* If this is inside a loop, but not in the current loop or one enclosed
3060 by it, it invalidates at least one loop. */
3062 if (! dest_loop)
3063 return;
3065 /* We must invalidate every nested loop containing the target of this
3066 label, except those that also contain the jump insn. */
3068 for (; dest_loop; dest_loop = dest_loop->outer)
3070 /* Stop when we reach a loop that also contains the jump insn. */
3071 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3072 if (dest_loop == outer_loop)
3073 return;
3075 /* If we get here, we know we need to invalidate a loop. */
3076 if (loop_dump_stream && ! dest_loop->invalid)
3077 fprintf (loop_dump_stream,
3078 "\nLoop at %d ignored due to multiple entry points.\n",
3079 INSN_UID (dest_loop->start));
3081 dest_loop->invalid = 1;
3083 return;
3085 case SET:
3086 /* If this is not setting pc, ignore. */
3087 if (SET_DEST (x) == pc_rtx)
3088 mark_loop_jump (SET_SRC (x), loop);
3089 return;
3091 case IF_THEN_ELSE:
3092 mark_loop_jump (XEXP (x, 1), loop);
3093 mark_loop_jump (XEXP (x, 2), loop);
3094 return;
3096 case PARALLEL:
3097 case ADDR_VEC:
3098 for (i = 0; i < XVECLEN (x, 0); i++)
3099 mark_loop_jump (XVECEXP (x, 0, i), loop);
3100 return;
3102 case ADDR_DIFF_VEC:
3103 for (i = 0; i < XVECLEN (x, 1); i++)
3104 mark_loop_jump (XVECEXP (x, 1, i), loop);
3105 return;
3107 default:
3108 /* Strictly speaking this is not a jump into the loop, only a possible
3109 jump out of the loop. However, we have no way to link the destination
3110 of this jump onto the list of exit labels. To be safe we mark this
3111 loop and any containing loops as invalid. */
3112 if (loop)
3114 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3116 if (loop_dump_stream && ! outer_loop->invalid)
3117 fprintf (loop_dump_stream,
3118 "\nLoop at %d ignored due to unknown exit jump.\n",
3119 INSN_UID (outer_loop->start));
3120 outer_loop->invalid = 1;
3123 return;
3127 /* Return nonzero if there is a label in the range from
3128 insn INSN to and including the insn whose luid is END
3129 INSN must have an assigned luid (i.e., it must not have
3130 been previously created by loop.c). */
3132 static int
3133 labels_in_range_p (insn, end)
3134 rtx insn;
3135 int end;
3137 while (insn && INSN_LUID (insn) <= end)
3139 if (GET_CODE (insn) == CODE_LABEL)
3140 return 1;
3141 insn = NEXT_INSN (insn);
3144 return 0;
3147 /* Record that a memory reference X is being set. */
3149 static void
3150 note_addr_stored (x, y, data)
3151 rtx x;
3152 rtx y ATTRIBUTE_UNUSED;
3153 void *data ATTRIBUTE_UNUSED;
3155 struct loop_info *loop_info = data;
3157 if (x == 0 || GET_CODE (x) != MEM)
3158 return;
3160 /* Count number of memory writes.
3161 This affects heuristics in strength_reduce. */
3162 loop_info->num_mem_sets++;
3164 /* BLKmode MEM means all memory is clobbered. */
3165 if (GET_MODE (x) == BLKmode)
3167 if (RTX_UNCHANGING_P (x))
3168 loop_info->unknown_constant_address_altered = 1;
3169 else
3170 loop_info->unknown_address_altered = 1;
3172 return;
3175 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3176 loop_info->store_mems);
3179 /* X is a value modified by an INSN that references a biv inside a loop
3180 exit test (ie, X is somehow related to the value of the biv). If X
3181 is a pseudo that is used more than once, then the biv is (effectively)
3182 used more than once. DATA is a pointer to a loop_regs structure. */
3184 static void
3185 note_set_pseudo_multiple_uses (x, y, data)
3186 rtx x;
3187 rtx y ATTRIBUTE_UNUSED;
3188 void *data;
3190 struct loop_regs *regs = (struct loop_regs *) data;
3192 if (x == 0)
3193 return;
3195 while (GET_CODE (x) == STRICT_LOW_PART
3196 || GET_CODE (x) == SIGN_EXTRACT
3197 || GET_CODE (x) == ZERO_EXTRACT
3198 || GET_CODE (x) == SUBREG)
3199 x = XEXP (x, 0);
3201 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3202 return;
3204 /* If we do not have usage information, or if we know the register
3205 is used more than once, note that fact for check_dbra_loop. */
3206 if (REGNO (x) >= max_reg_before_loop
3207 || ! regs->array[REGNO (x)].single_usage
3208 || regs->array[REGNO (x)].single_usage == const0_rtx)
3209 regs->multiple_uses = 1;
3212 /* Return nonzero if the rtx X is invariant over the current loop.
3214 The value is 2 if we refer to something only conditionally invariant.
3216 A memory ref is invariant if it is not volatile and does not conflict
3217 with anything stored in `loop_info->store_mems'. */
3220 loop_invariant_p (loop, x)
3221 const struct loop *loop;
3222 rtx x;
3224 struct loop_info *loop_info = LOOP_INFO (loop);
3225 struct loop_regs *regs = LOOP_REGS (loop);
3226 int i;
3227 enum rtx_code code;
3228 const char *fmt;
3229 int conditional = 0;
3230 rtx mem_list_entry;
3232 if (x == 0)
3233 return 1;
3234 code = GET_CODE (x);
3235 switch (code)
3237 case CONST_INT:
3238 case CONST_DOUBLE:
3239 case SYMBOL_REF:
3240 case CONST:
3241 return 1;
3243 case LABEL_REF:
3244 /* A LABEL_REF is normally invariant, however, if we are unrolling
3245 loops, and this label is inside the loop, then it isn't invariant.
3246 This is because each unrolled copy of the loop body will have
3247 a copy of this label. If this was invariant, then an insn loading
3248 the address of this label into a register might get moved outside
3249 the loop, and then each loop body would end up using the same label.
3251 We don't know the loop bounds here though, so just fail for all
3252 labels. */
3253 if (flag_unroll_loops)
3254 return 0;
3255 else
3256 return 1;
3258 case PC:
3259 case CC0:
3260 case UNSPEC_VOLATILE:
3261 return 0;
3263 case REG:
3264 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3265 since the reg might be set by initialization within the loop. */
3267 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3268 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3269 && ! current_function_has_nonlocal_goto)
3270 return 1;
3272 if (LOOP_INFO (loop)->has_call
3273 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3274 return 0;
3276 /* Out-of-range regs can occur when we are called from unrolling.
3277 These have always been created by the unroller and are set in
3278 the loop, hence are never invariant. */
3280 if (REGNO (x) >= regs->num)
3281 return 0;
3283 if (regs->array[REGNO (x)].set_in_loop < 0)
3284 return 2;
3286 return regs->array[REGNO (x)].set_in_loop == 0;
3288 case MEM:
3289 /* Volatile memory references must be rejected. Do this before
3290 checking for read-only items, so that volatile read-only items
3291 will be rejected also. */
3292 if (MEM_VOLATILE_P (x))
3293 return 0;
3295 /* See if there is any dependence between a store and this load. */
3296 mem_list_entry = loop_info->store_mems;
3297 while (mem_list_entry)
3299 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3300 x, rtx_varies_p))
3301 return 0;
3303 mem_list_entry = XEXP (mem_list_entry, 1);
3306 /* It's not invalidated by a store in memory
3307 but we must still verify the address is invariant. */
3308 break;
3310 case ASM_OPERANDS:
3311 /* Don't mess with insns declared volatile. */
3312 if (MEM_VOLATILE_P (x))
3313 return 0;
3314 break;
3316 default:
3317 break;
3320 fmt = GET_RTX_FORMAT (code);
3321 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3323 if (fmt[i] == 'e')
3325 int tem = loop_invariant_p (loop, XEXP (x, i));
3326 if (tem == 0)
3327 return 0;
3328 if (tem == 2)
3329 conditional = 1;
3331 else if (fmt[i] == 'E')
3333 int j;
3334 for (j = 0; j < XVECLEN (x, i); j++)
3336 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3337 if (tem == 0)
3338 return 0;
3339 if (tem == 2)
3340 conditional = 1;
3346 return 1 + conditional;
3349 /* Return nonzero if all the insns in the loop that set REG
3350 are INSN and the immediately following insns,
3351 and if each of those insns sets REG in an invariant way
3352 (not counting uses of REG in them).
3354 The value is 2 if some of these insns are only conditionally invariant.
3356 We assume that INSN itself is the first set of REG
3357 and that its source is invariant. */
3359 static int
3360 consec_sets_invariant_p (loop, reg, n_sets, insn)
3361 const struct loop *loop;
3362 int n_sets;
3363 rtx reg, insn;
3365 struct loop_regs *regs = LOOP_REGS (loop);
3366 rtx p = insn;
3367 unsigned int regno = REGNO (reg);
3368 rtx temp;
3369 /* Number of sets we have to insist on finding after INSN. */
3370 int count = n_sets - 1;
3371 int old = regs->array[regno].set_in_loop;
3372 int value = 0;
3373 int this;
3375 /* If N_SETS hit the limit, we can't rely on its value. */
3376 if (n_sets == 127)
3377 return 0;
3379 regs->array[regno].set_in_loop = 0;
3381 while (count > 0)
3383 enum rtx_code code;
3384 rtx set;
3386 p = NEXT_INSN (p);
3387 code = GET_CODE (p);
3389 /* If library call, skip to end of it. */
3390 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3391 p = XEXP (temp, 0);
3393 this = 0;
3394 if (code == INSN
3395 && (set = single_set (p))
3396 && GET_CODE (SET_DEST (set)) == REG
3397 && REGNO (SET_DEST (set)) == regno)
3399 this = loop_invariant_p (loop, SET_SRC (set));
3400 if (this != 0)
3401 value |= this;
3402 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3404 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3405 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3406 notes are OK. */
3407 this = (CONSTANT_P (XEXP (temp, 0))
3408 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3409 && loop_invariant_p (loop, XEXP (temp, 0))));
3410 if (this != 0)
3411 value |= this;
3414 if (this != 0)
3415 count--;
3416 else if (code != NOTE)
3418 regs->array[regno].set_in_loop = old;
3419 return 0;
3423 regs->array[regno].set_in_loop = old;
3424 /* If loop_invariant_p ever returned 2, we return 2. */
3425 return 1 + (value & 2);
3428 #if 0
3429 /* I don't think this condition is sufficient to allow INSN
3430 to be moved, so we no longer test it. */
3432 /* Return 1 if all insns in the basic block of INSN and following INSN
3433 that set REG are invariant according to TABLE. */
3435 static int
3436 all_sets_invariant_p (reg, insn, table)
3437 rtx reg, insn;
3438 short *table;
3440 rtx p = insn;
3441 int regno = REGNO (reg);
3443 while (1)
3445 enum rtx_code code;
3446 p = NEXT_INSN (p);
3447 code = GET_CODE (p);
3448 if (code == CODE_LABEL || code == JUMP_INSN)
3449 return 1;
3450 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3451 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3452 && REGNO (SET_DEST (PATTERN (p))) == regno)
3454 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3455 return 0;
3459 #endif /* 0 */
3461 /* Look at all uses (not sets) of registers in X. For each, if it is
3462 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3463 a different insn, set USAGE[REGNO] to const0_rtx. */
3465 static void
3466 find_single_use_in_loop (regs, insn, x)
3467 struct loop_regs *regs;
3468 rtx insn;
3469 rtx x;
3471 enum rtx_code code = GET_CODE (x);
3472 const char *fmt = GET_RTX_FORMAT (code);
3473 int i, j;
3475 if (code == REG)
3476 regs->array[REGNO (x)].single_usage
3477 = (regs->array[REGNO (x)].single_usage != 0
3478 && regs->array[REGNO (x)].single_usage != insn)
3479 ? const0_rtx : insn;
3481 else if (code == SET)
3483 /* Don't count SET_DEST if it is a REG; otherwise count things
3484 in SET_DEST because if a register is partially modified, it won't
3485 show up as a potential movable so we don't care how USAGE is set
3486 for it. */
3487 if (GET_CODE (SET_DEST (x)) != REG)
3488 find_single_use_in_loop (regs, insn, SET_DEST (x));
3489 find_single_use_in_loop (regs, insn, SET_SRC (x));
3491 else
3492 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3494 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3495 find_single_use_in_loop (regs, insn, XEXP (x, i));
3496 else if (fmt[i] == 'E')
3497 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3498 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3502 /* Count and record any set in X which is contained in INSN. Update
3503 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3504 in X. */
3506 static void
3507 count_one_set (regs, insn, x, last_set)
3508 struct loop_regs *regs;
3509 rtx insn, x;
3510 rtx *last_set;
3512 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3513 /* Don't move a reg that has an explicit clobber.
3514 It's not worth the pain to try to do it correctly. */
3515 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3517 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3519 rtx dest = SET_DEST (x);
3520 while (GET_CODE (dest) == SUBREG
3521 || GET_CODE (dest) == ZERO_EXTRACT
3522 || GET_CODE (dest) == SIGN_EXTRACT
3523 || GET_CODE (dest) == STRICT_LOW_PART)
3524 dest = XEXP (dest, 0);
3525 if (GET_CODE (dest) == REG)
3527 int i;
3528 int regno = REGNO (dest);
3529 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3531 /* If this is the first setting of this reg
3532 in current basic block, and it was set before,
3533 it must be set in two basic blocks, so it cannot
3534 be moved out of the loop. */
3535 if (regs->array[regno].set_in_loop > 0
3536 && last_set == 0)
3537 regs->array[regno+i].may_not_optimize = 1;
3538 /* If this is not first setting in current basic block,
3539 see if reg was used in between previous one and this.
3540 If so, neither one can be moved. */
3541 if (last_set[regno] != 0
3542 && reg_used_between_p (dest, last_set[regno], insn))
3543 regs->array[regno+i].may_not_optimize = 1;
3544 if (regs->array[regno+i].set_in_loop < 127)
3545 ++regs->array[regno+i].set_in_loop;
3546 last_set[regno+i] = insn;
3552 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3553 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3554 contained in insn INSN is used by any insn that precedes INSN in
3555 cyclic order starting from the loop entry point.
3557 We don't want to use INSN_LUID here because if we restrict INSN to those
3558 that have a valid INSN_LUID, it means we cannot move an invariant out
3559 from an inner loop past two loops. */
3561 static int
3562 loop_reg_used_before_p (loop, set, insn)
3563 const struct loop *loop;
3564 rtx set, insn;
3566 rtx reg = SET_DEST (set);
3567 rtx p;
3569 /* Scan forward checking for register usage. If we hit INSN, we
3570 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3571 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3573 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3574 return 1;
3576 if (p == loop->end)
3577 p = loop->start;
3580 return 0;
3584 /* Information we collect about arrays that we might want to prefetch. */
3585 struct prefetch_info
3587 struct iv_class *class; /* Class this prefetch is based on. */
3588 struct induction *giv; /* GIV this prefetch is based on. */
3589 rtx base_address; /* Start prefetching from this address plus
3590 index. */
3591 HOST_WIDE_INT index;
3592 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3593 iteration. */
3594 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3595 prefetch area in one iteration. */
3596 unsigned int total_bytes; /* Total bytes loop will access in this block.
3597 This is set only for loops with known
3598 iteration counts and is 0xffffffff
3599 otherwise. */
3600 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3601 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3602 unsigned int write : 1; /* 1 for read/write prefetches. */
3605 /* Data used by check_store function. */
3606 struct check_store_data
3608 rtx mem_address;
3609 int mem_write;
3612 static void check_store PARAMS ((rtx, rtx, void *));
3613 static void emit_prefetch_instructions PARAMS ((struct loop *));
3614 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3616 /* Set mem_write when mem_address is found. Used as callback to
3617 note_stores. */
3618 static void
3619 check_store (x, pat, data)
3620 rtx x, pat ATTRIBUTE_UNUSED;
3621 void *data;
3623 struct check_store_data *d = (struct check_store_data *) data;
3625 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3626 d->mem_write = 1;
3629 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3630 important to get some addresses combined. Later more sophisticated
3631 transformations can be added when necesary.
3633 ??? Same trick with swapping operand is done at several other places.
3634 It can be nice to develop some common way to handle this. */
3636 static int
3637 rtx_equal_for_prefetch_p (x, y)
3638 rtx x, y;
3640 int i;
3641 int j;
3642 enum rtx_code code = GET_CODE (x);
3643 const char *fmt;
3645 if (x == y)
3646 return 1;
3647 if (code != GET_CODE (y))
3648 return 0;
3650 code = GET_CODE (x);
3652 if (GET_RTX_CLASS (code) == 'c')
3654 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3655 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3656 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3657 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3659 /* Compare the elements. If any pair of corresponding elements fails to
3660 match, return 0 for the whole thing. */
3662 fmt = GET_RTX_FORMAT (code);
3663 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3665 switch (fmt[i])
3667 case 'w':
3668 if (XWINT (x, i) != XWINT (y, i))
3669 return 0;
3670 break;
3672 case 'i':
3673 if (XINT (x, i) != XINT (y, i))
3674 return 0;
3675 break;
3677 case 'E':
3678 /* Two vectors must have the same length. */
3679 if (XVECLEN (x, i) != XVECLEN (y, i))
3680 return 0;
3682 /* And the corresponding elements must match. */
3683 for (j = 0; j < XVECLEN (x, i); j++)
3684 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3685 XVECEXP (y, i, j)) == 0)
3686 return 0;
3687 break;
3689 case 'e':
3690 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3691 return 0;
3692 break;
3694 case 's':
3695 if (strcmp (XSTR (x, i), XSTR (y, i)))
3696 return 0;
3697 break;
3699 case 'u':
3700 /* These are just backpointers, so they don't matter. */
3701 break;
3703 case '0':
3704 break;
3706 /* It is believed that rtx's at this level will never
3707 contain anything but integers and other rtx's,
3708 except for within LABEL_REFs and SYMBOL_REFs. */
3709 default:
3710 abort ();
3713 return 1;
3716 /* Remove constant addition value from the expression X (when present)
3717 and return it. */
3719 static HOST_WIDE_INT
3720 remove_constant_addition (x)
3721 rtx *x;
3723 HOST_WIDE_INT addval = 0;
3724 rtx exp = *x;
3726 /* Avoid clobbering a shared CONST expression. */
3727 if (GET_CODE (exp) == CONST)
3729 if (GET_CODE (XEXP (exp, 0)) == PLUS
3730 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3731 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3733 *x = XEXP (XEXP (exp, 0), 0);
3734 return INTVAL (XEXP (XEXP (exp, 0), 1));
3736 return 0;
3739 if (GET_CODE (exp) == CONST_INT)
3741 addval = INTVAL (exp);
3742 *x = const0_rtx;
3745 /* For plus expression recurse on ourself. */
3746 else if (GET_CODE (exp) == PLUS)
3748 addval += remove_constant_addition (&XEXP (exp, 0));
3749 addval += remove_constant_addition (&XEXP (exp, 1));
3751 /* In case our parameter was constant, remove extra zero from the
3752 expression. */
3753 if (XEXP (exp, 0) == const0_rtx)
3754 *x = XEXP (exp, 1);
3755 else if (XEXP (exp, 1) == const0_rtx)
3756 *x = XEXP (exp, 0);
3759 return addval;
3762 /* Attempt to identify accesses to arrays that are most likely to cause cache
3763 misses, and emit prefetch instructions a few prefetch blocks forward.
3765 To detect the arrays we use the GIV information that was collected by the
3766 strength reduction pass.
3768 The prefetch instructions are generated after the GIV information is done
3769 and before the strength reduction process. The new GIVs are injected into
3770 the strength reduction tables, so the prefetch addresses are optimized as
3771 well.
3773 GIVs are split into base address, stride, and constant addition values.
3774 GIVs with the same address, stride and close addition values are combined
3775 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3776 for write instructions can be used for the block we write to, on machines
3777 that support write prefetches.
3779 Several heuristics are used to determine when to prefetch. They are
3780 controlled by defined symbols that can be overridden for each target. */
3782 static void
3783 emit_prefetch_instructions (loop)
3784 struct loop *loop;
3786 int num_prefetches = 0;
3787 int num_real_prefetches = 0;
3788 int num_real_write_prefetches = 0;
3789 int num_prefetches_before = 0;
3790 int num_write_prefetches_before = 0;
3791 int ahead = 0;
3792 int i;
3793 struct iv_class *bl;
3794 struct induction *iv;
3795 struct prefetch_info info[MAX_PREFETCHES];
3796 struct loop_ivs *ivs = LOOP_IVS (loop);
3798 if (!HAVE_prefetch)
3799 return;
3801 /* Consider only loops w/o calls. When a call is done, the loop is probably
3802 slow enough to read the memory. */
3803 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3805 if (loop_dump_stream)
3806 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3808 return;
3811 /* Don't prefetch in loops known to have few iterations. */
3812 if (PREFETCH_NO_LOW_LOOPCNT
3813 && LOOP_INFO (loop)->n_iterations
3814 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3816 if (loop_dump_stream)
3817 fprintf (loop_dump_stream,
3818 "Prefetch: ignoring loop: not enough iterations.\n");
3819 return;
3822 /* Search all induction variables and pick those interesting for the prefetch
3823 machinery. */
3824 for (bl = ivs->list; bl; bl = bl->next)
3826 struct induction *biv = bl->biv, *biv1;
3827 int basestride = 0;
3829 biv1 = biv;
3831 /* Expect all BIVs to be executed in each iteration. This makes our
3832 analysis more conservative. */
3833 while (biv1)
3835 /* Discard non-constant additions that we can't handle well yet, and
3836 BIVs that are executed multiple times; such BIVs ought to be
3837 handled in the nested loop. We accept not_every_iteration BIVs,
3838 since these only result in larger strides and make our
3839 heuristics more conservative. */
3840 if (GET_CODE (biv->add_val) != CONST_INT)
3842 if (loop_dump_stream)
3844 fprintf (loop_dump_stream,
3845 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3846 REGNO (biv->src_reg), INSN_UID (biv->insn));
3847 print_rtl (loop_dump_stream, biv->add_val);
3848 fprintf (loop_dump_stream, "\n");
3850 break;
3853 if (biv->maybe_multiple)
3855 if (loop_dump_stream)
3857 fprintf (loop_dump_stream,
3858 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3859 REGNO (biv->src_reg), INSN_UID (biv->insn));
3860 print_rtl (loop_dump_stream, biv->add_val);
3861 fprintf (loop_dump_stream, "\n");
3863 break;
3866 basestride += INTVAL (biv1->add_val);
3867 biv1 = biv1->next_iv;
3870 if (biv1 || !basestride)
3871 continue;
3873 for (iv = bl->giv; iv; iv = iv->next_iv)
3875 rtx address;
3876 rtx temp;
3877 HOST_WIDE_INT index = 0;
3878 int add = 1;
3879 HOST_WIDE_INT stride = 0;
3880 int stride_sign = 1;
3881 struct check_store_data d;
3882 const char *ignore_reason = NULL;
3883 int size = GET_MODE_SIZE (GET_MODE (iv));
3885 /* See whether an induction variable is interesting to us and if
3886 not, report the reason. */
3887 if (iv->giv_type != DEST_ADDR)
3888 ignore_reason = "giv is not a destination address";
3890 /* We are interested only in constant stride memory references
3891 in order to be able to compute density easily. */
3892 else if (GET_CODE (iv->mult_val) != CONST_INT)
3893 ignore_reason = "stride is not constant";
3895 else
3897 stride = INTVAL (iv->mult_val) * basestride;
3898 if (stride < 0)
3900 stride = -stride;
3901 stride_sign = -1;
3904 /* On some targets, reversed order prefetches are not
3905 worthwhile. */
3906 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3907 ignore_reason = "reversed order stride";
3909 /* Prefetch of accesses with an extreme stride might not be
3910 worthwhile, either. */
3911 else if (PREFETCH_NO_EXTREME_STRIDE
3912 && stride > PREFETCH_EXTREME_STRIDE)
3913 ignore_reason = "extreme stride";
3915 /* Ignore GIVs with varying add values; we can't predict the
3916 value for the next iteration. */
3917 else if (!loop_invariant_p (loop, iv->add_val))
3918 ignore_reason = "giv has varying add value";
3920 /* Ignore GIVs in the nested loops; they ought to have been
3921 handled already. */
3922 else if (iv->maybe_multiple)
3923 ignore_reason = "giv is in nested loop";
3926 if (ignore_reason != NULL)
3928 if (loop_dump_stream)
3929 fprintf (loop_dump_stream,
3930 "Prefetch: ignoring giv at %d: %s.\n",
3931 INSN_UID (iv->insn), ignore_reason);
3932 continue;
3935 /* Determine the pointer to the basic array we are examining. It is
3936 the sum of the BIV's initial value and the GIV's add_val. */
3937 address = copy_rtx (iv->add_val);
3938 temp = copy_rtx (bl->initial_value);
3940 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3941 index = remove_constant_addition (&address);
3943 d.mem_write = 0;
3944 d.mem_address = *iv->location;
3946 /* When the GIV is not always executed, we might be better off by
3947 not dirtying the cache pages. */
3948 if (PREFETCH_CONDITIONAL || iv->always_executed)
3949 note_stores (PATTERN (iv->insn), check_store, &d);
3950 else
3952 if (loop_dump_stream)
3953 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3954 INSN_UID (iv->insn), "in conditional code.");
3955 continue;
3958 /* Attempt to find another prefetch to the same array and see if we
3959 can merge this one. */
3960 for (i = 0; i < num_prefetches; i++)
3961 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3962 && stride == info[i].stride)
3964 /* In case both access same array (same location
3965 just with small difference in constant indexes), merge
3966 the prefetches. Just do the later and the earlier will
3967 get prefetched from previous iteration.
3968 The artificial threshold should not be too small,
3969 but also not bigger than small portion of memory usually
3970 traversed by single loop. */
3971 if (index >= info[i].index
3972 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3974 info[i].write |= d.mem_write;
3975 info[i].bytes_accessed += size;
3976 info[i].index = index;
3977 info[i].giv = iv;
3978 info[i].class = bl;
3979 info[num_prefetches].base_address = address;
3980 add = 0;
3981 break;
3984 if (index < info[i].index
3985 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3987 info[i].write |= d.mem_write;
3988 info[i].bytes_accessed += size;
3989 add = 0;
3990 break;
3994 /* Merging failed. */
3995 if (add)
3997 info[num_prefetches].giv = iv;
3998 info[num_prefetches].class = bl;
3999 info[num_prefetches].index = index;
4000 info[num_prefetches].stride = stride;
4001 info[num_prefetches].base_address = address;
4002 info[num_prefetches].write = d.mem_write;
4003 info[num_prefetches].bytes_accessed = size;
4004 num_prefetches++;
4005 if (num_prefetches >= MAX_PREFETCHES)
4007 if (loop_dump_stream)
4008 fprintf (loop_dump_stream,
4009 "Maximal number of prefetches exceeded.\n");
4010 return;
4016 for (i = 0; i < num_prefetches; i++)
4018 int density;
4020 /* Attempt to calculate the total number of bytes fetched by all
4021 iterations of the loop. Avoid overflow. */
4022 if (LOOP_INFO (loop)->n_iterations
4023 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4024 >= LOOP_INFO (loop)->n_iterations))
4025 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4026 else
4027 info[i].total_bytes = 0xffffffff;
4029 density = info[i].bytes_accessed * 100 / info[i].stride;
4031 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4032 if (PREFETCH_ONLY_DENSE_MEM)
4033 if (density * 256 > PREFETCH_DENSE_MEM * 100
4034 && (info[i].total_bytes / PREFETCH_BLOCK
4035 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4037 info[i].prefetch_before_loop = 1;
4038 info[i].prefetch_in_loop
4039 = (info[i].total_bytes / PREFETCH_BLOCK
4040 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4042 else
4044 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4045 if (loop_dump_stream)
4046 fprintf (loop_dump_stream,
4047 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4048 INSN_UID (info[i].giv->insn), density);
4050 else
4051 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4053 /* Find how many prefetch instructions we'll use within the loop. */
4054 if (info[i].prefetch_in_loop != 0)
4056 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4057 / PREFETCH_BLOCK);
4058 num_real_prefetches += info[i].prefetch_in_loop;
4059 if (info[i].write)
4060 num_real_write_prefetches += info[i].prefetch_in_loop;
4064 /* Determine how many iterations ahead to prefetch within the loop, based
4065 on how many prefetches we currently expect to do within the loop. */
4066 if (num_real_prefetches != 0)
4068 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4070 if (loop_dump_stream)
4071 fprintf (loop_dump_stream,
4072 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4073 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4074 num_real_prefetches = 0, num_real_write_prefetches = 0;
4077 /* We'll also use AHEAD to determine how many prefetch instructions to
4078 emit before a loop, so don't leave it zero. */
4079 if (ahead == 0)
4080 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4082 for (i = 0; i < num_prefetches; i++)
4084 /* Update if we've decided not to prefetch anything within the loop. */
4085 if (num_real_prefetches == 0)
4086 info[i].prefetch_in_loop = 0;
4088 /* Find how many prefetch instructions we'll use before the loop. */
4089 if (info[i].prefetch_before_loop != 0)
4091 int n = info[i].total_bytes / PREFETCH_BLOCK;
4092 if (n > ahead)
4093 n = ahead;
4094 info[i].prefetch_before_loop = n;
4095 num_prefetches_before += n;
4096 if (info[i].write)
4097 num_write_prefetches_before += n;
4100 if (loop_dump_stream)
4102 if (info[i].prefetch_in_loop == 0
4103 && info[i].prefetch_before_loop == 0)
4104 continue;
4105 fprintf (loop_dump_stream, "Prefetch insn: %d",
4106 INSN_UID (info[i].giv->insn));
4107 fprintf (loop_dump_stream,
4108 "; in loop: %d; before: %d; %s\n",
4109 info[i].prefetch_in_loop,
4110 info[i].prefetch_before_loop,
4111 info[i].write ? "read/write" : "read only");
4112 fprintf (loop_dump_stream,
4113 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4114 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4115 info[i].bytes_accessed, info[i].total_bytes);
4116 fprintf (loop_dump_stream, " index: ");
4117 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4118 fprintf (loop_dump_stream, "; stride: ");
4119 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4120 fprintf (loop_dump_stream, "; address: ");
4121 print_rtl (loop_dump_stream, info[i].base_address);
4122 fprintf (loop_dump_stream, "\n");
4126 if (num_real_prefetches + num_prefetches_before > 0)
4128 /* Record that this loop uses prefetch instructions. */
4129 LOOP_INFO (loop)->has_prefetch = 1;
4131 if (loop_dump_stream)
4133 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4134 num_real_prefetches, num_real_write_prefetches);
4135 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4136 num_prefetches_before, num_write_prefetches_before);
4140 for (i = 0; i < num_prefetches; i++)
4142 int y;
4144 for (y = 0; y < info[i].prefetch_in_loop; y++)
4146 rtx loc = copy_rtx (*info[i].giv->location);
4147 rtx insn;
4148 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4149 rtx before_insn = info[i].giv->insn;
4150 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4151 rtx seq;
4153 /* We can save some effort by offsetting the address on
4154 architectures with offsettable memory references. */
4155 if (offsettable_address_p (0, VOIDmode, loc))
4156 loc = plus_constant (loc, bytes_ahead);
4157 else
4159 rtx reg = gen_reg_rtx (Pmode);
4160 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4161 GEN_INT (bytes_ahead), reg,
4162 0, before_insn);
4163 loc = reg;
4166 start_sequence ();
4167 /* Make sure the address operand is valid for prefetch. */
4168 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4169 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4170 loc = force_reg (Pmode, loc);
4171 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4172 GEN_INT (3)));
4173 seq = get_insns ();
4174 end_sequence ();
4175 emit_insn_before (seq, before_insn);
4177 /* Check all insns emitted and record the new GIV
4178 information. */
4179 insn = NEXT_INSN (prev_insn);
4180 while (insn != before_insn)
4182 insn = check_insn_for_givs (loop, insn,
4183 info[i].giv->always_executed,
4184 info[i].giv->maybe_multiple);
4185 insn = NEXT_INSN (insn);
4189 if (PREFETCH_BEFORE_LOOP)
4191 /* Emit insns before the loop to fetch the first cache lines or,
4192 if we're not prefetching within the loop, everything we expect
4193 to need. */
4194 for (y = 0; y < info[i].prefetch_before_loop; y++)
4196 rtx reg = gen_reg_rtx (Pmode);
4197 rtx loop_start = loop->start;
4198 rtx init_val = info[i].class->initial_value;
4199 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4200 info[i].giv->add_val,
4201 GEN_INT (y * PREFETCH_BLOCK));
4203 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4204 non-constant INIT_VAL to have the same mode as REG, which
4205 in this case we know to be Pmode. */
4206 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4207 init_val = convert_to_mode (Pmode, init_val, 0);
4208 loop_iv_add_mult_emit_before (loop, init_val,
4209 info[i].giv->mult_val,
4210 add_val, reg, 0, loop_start);
4211 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4212 GEN_INT (3)),
4213 loop_start);
4218 return;
4221 /* A "basic induction variable" or biv is a pseudo reg that is set
4222 (within this loop) only by incrementing or decrementing it. */
4223 /* A "general induction variable" or giv is a pseudo reg whose
4224 value is a linear function of a biv. */
4226 /* Bivs are recognized by `basic_induction_var';
4227 Givs by `general_induction_var'. */
4229 /* Communication with routines called via `note_stores'. */
4231 static rtx note_insn;
4233 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4235 static rtx addr_placeholder;
4237 /* ??? Unfinished optimizations, and possible future optimizations,
4238 for the strength reduction code. */
4240 /* ??? The interaction of biv elimination, and recognition of 'constant'
4241 bivs, may cause problems. */
4243 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4244 performance problems.
4246 Perhaps don't eliminate things that can be combined with an addressing
4247 mode. Find all givs that have the same biv, mult_val, and add_val;
4248 then for each giv, check to see if its only use dies in a following
4249 memory address. If so, generate a new memory address and check to see
4250 if it is valid. If it is valid, then store the modified memory address,
4251 otherwise, mark the giv as not done so that it will get its own iv. */
4253 /* ??? Could try to optimize branches when it is known that a biv is always
4254 positive. */
4256 /* ??? When replace a biv in a compare insn, we should replace with closest
4257 giv so that an optimized branch can still be recognized by the combiner,
4258 e.g. the VAX acb insn. */
4260 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4261 was rerun in loop_optimize whenever a register was added or moved.
4262 Also, some of the optimizations could be a little less conservative. */
4264 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4265 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4266 callback.
4268 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4269 least once for every loop iteration except for the last one.
4271 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4272 loop iteration.
4274 void
4275 for_each_insn_in_loop (loop, fncall)
4276 struct loop *loop;
4277 loop_insn_callback fncall;
4279 int not_every_iteration = 0;
4280 int maybe_multiple = 0;
4281 int past_loop_latch = 0;
4282 int loop_depth = 0;
4283 rtx p;
4285 /* If loop_scan_start points to the loop exit test, we have to be wary of
4286 subversive use of gotos inside expression statements. */
4287 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4288 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4290 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4291 for (p = next_insn_in_loop (loop, loop->scan_start);
4292 p != NULL_RTX;
4293 p = next_insn_in_loop (loop, p))
4295 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4297 /* Past CODE_LABEL, we get to insns that may be executed multiple
4298 times. The only way we can be sure that they can't is if every
4299 jump insn between here and the end of the loop either
4300 returns, exits the loop, is a jump to a location that is still
4301 behind the label, or is a jump to the loop start. */
4303 if (GET_CODE (p) == CODE_LABEL)
4305 rtx insn = p;
4307 maybe_multiple = 0;
4309 while (1)
4311 insn = NEXT_INSN (insn);
4312 if (insn == loop->scan_start)
4313 break;
4314 if (insn == loop->end)
4316 if (loop->top != 0)
4317 insn = loop->top;
4318 else
4319 break;
4320 if (insn == loop->scan_start)
4321 break;
4324 if (GET_CODE (insn) == JUMP_INSN
4325 && GET_CODE (PATTERN (insn)) != RETURN
4326 && (!any_condjump_p (insn)
4327 || (JUMP_LABEL (insn) != 0
4328 && JUMP_LABEL (insn) != loop->scan_start
4329 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4331 maybe_multiple = 1;
4332 break;
4337 /* Past a jump, we get to insns for which we can't count
4338 on whether they will be executed during each iteration. */
4339 /* This code appears twice in strength_reduce. There is also similar
4340 code in scan_loop. */
4341 if (GET_CODE (p) == JUMP_INSN
4342 /* If we enter the loop in the middle, and scan around to the
4343 beginning, don't set not_every_iteration for that.
4344 This can be any kind of jump, since we want to know if insns
4345 will be executed if the loop is executed. */
4346 && !(JUMP_LABEL (p) == loop->top
4347 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4348 && any_uncondjump_p (p))
4349 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4351 rtx label = 0;
4353 /* If this is a jump outside the loop, then it also doesn't
4354 matter. Check to see if the target of this branch is on the
4355 loop->exits_labels list. */
4357 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4358 if (XEXP (label, 0) == JUMP_LABEL (p))
4359 break;
4361 if (!label)
4362 not_every_iteration = 1;
4365 else if (GET_CODE (p) == NOTE)
4367 /* At the virtual top of a converted loop, insns are again known to
4368 be executed each iteration: logically, the loop begins here
4369 even though the exit code has been duplicated.
4371 Insns are also again known to be executed each iteration at
4372 the LOOP_CONT note. */
4373 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4374 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4375 && loop_depth == 0)
4376 not_every_iteration = 0;
4377 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4378 loop_depth++;
4379 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4380 loop_depth--;
4383 /* Note if we pass a loop latch. If we do, then we can not clear
4384 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4385 a loop since a jump before the last CODE_LABEL may have started
4386 a new loop iteration.
4388 Note that LOOP_TOP is only set for rotated loops and we need
4389 this check for all loops, so compare against the CODE_LABEL
4390 which immediately follows LOOP_START. */
4391 if (GET_CODE (p) == JUMP_INSN
4392 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4393 past_loop_latch = 1;
4395 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4396 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4397 or not an insn is known to be executed each iteration of the
4398 loop, whether or not any iterations are known to occur.
4400 Therefore, if we have just passed a label and have no more labels
4401 between here and the test insn of the loop, and we have not passed
4402 a jump to the top of the loop, then we know these insns will be
4403 executed each iteration. */
4405 if (not_every_iteration
4406 && !past_loop_latch
4407 && GET_CODE (p) == CODE_LABEL
4408 && no_labels_between_p (p, loop->end)
4409 && loop_insn_first_p (p, loop->cont))
4410 not_every_iteration = 0;
4414 static void
4415 loop_bivs_find (loop)
4416 struct loop *loop;
4418 struct loop_regs *regs = LOOP_REGS (loop);
4419 struct loop_ivs *ivs = LOOP_IVS (loop);
4420 /* Temporary list pointers for traversing ivs->list. */
4421 struct iv_class *bl, **backbl;
4423 ivs->list = 0;
4425 for_each_insn_in_loop (loop, check_insn_for_bivs);
4427 /* Scan ivs->list to remove all regs that proved not to be bivs.
4428 Make a sanity check against regs->n_times_set. */
4429 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4431 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4432 /* Above happens if register modified by subreg, etc. */
4433 /* Make sure it is not recognized as a basic induction var: */
4434 || regs->array[bl->regno].n_times_set != bl->biv_count
4435 /* If never incremented, it is invariant that we decided not to
4436 move. So leave it alone. */
4437 || ! bl->incremented)
4439 if (loop_dump_stream)
4440 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4441 bl->regno,
4442 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4443 ? "not induction variable"
4444 : (! bl->incremented ? "never incremented"
4445 : "count error")));
4447 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4448 *backbl = bl->next;
4450 else
4452 backbl = &bl->next;
4454 if (loop_dump_stream)
4455 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4461 /* Determine how BIVS are initialized by looking through pre-header
4462 extended basic block. */
4463 static void
4464 loop_bivs_init_find (loop)
4465 struct loop *loop;
4467 struct loop_ivs *ivs = LOOP_IVS (loop);
4468 /* Temporary list pointers for traversing ivs->list. */
4469 struct iv_class *bl;
4470 int call_seen;
4471 rtx p;
4473 /* Find initial value for each biv by searching backwards from loop_start,
4474 halting at first label. Also record any test condition. */
4476 call_seen = 0;
4477 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4479 rtx test;
4481 note_insn = p;
4483 if (GET_CODE (p) == CALL_INSN)
4484 call_seen = 1;
4486 if (INSN_P (p))
4487 note_stores (PATTERN (p), record_initial, ivs);
4489 /* Record any test of a biv that branches around the loop if no store
4490 between it and the start of loop. We only care about tests with
4491 constants and registers and only certain of those. */
4492 if (GET_CODE (p) == JUMP_INSN
4493 && JUMP_LABEL (p) != 0
4494 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4495 && (test = get_condition_for_loop (loop, p)) != 0
4496 && GET_CODE (XEXP (test, 0)) == REG
4497 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4498 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4499 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4500 && bl->init_insn == 0)
4502 /* If an NE test, we have an initial value! */
4503 if (GET_CODE (test) == NE)
4505 bl->init_insn = p;
4506 bl->init_set = gen_rtx_SET (VOIDmode,
4507 XEXP (test, 0), XEXP (test, 1));
4509 else
4510 bl->initial_test = test;
4516 /* Look at the each biv and see if we can say anything better about its
4517 initial value from any initializing insns set up above. (This is done
4518 in two passes to avoid missing SETs in a PARALLEL.) */
4519 static void
4520 loop_bivs_check (loop)
4521 struct loop *loop;
4523 struct loop_ivs *ivs = LOOP_IVS (loop);
4524 /* Temporary list pointers for traversing ivs->list. */
4525 struct iv_class *bl;
4526 struct iv_class **backbl;
4528 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4530 rtx src;
4531 rtx note;
4533 if (! bl->init_insn)
4534 continue;
4536 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4537 is a constant, use the value of that. */
4538 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4539 && CONSTANT_P (XEXP (note, 0)))
4540 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4541 && CONSTANT_P (XEXP (note, 0))))
4542 src = XEXP (note, 0);
4543 else
4544 src = SET_SRC (bl->init_set);
4546 if (loop_dump_stream)
4547 fprintf (loop_dump_stream,
4548 "Biv %d: initialized at insn %d: initial value ",
4549 bl->regno, INSN_UID (bl->init_insn));
4551 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4552 || GET_MODE (src) == VOIDmode)
4553 && valid_initial_value_p (src, bl->init_insn,
4554 LOOP_INFO (loop)->pre_header_has_call,
4555 loop->start))
4557 bl->initial_value = src;
4559 if (loop_dump_stream)
4561 print_simple_rtl (loop_dump_stream, src);
4562 fputc ('\n', loop_dump_stream);
4565 /* If we can't make it a giv,
4566 let biv keep initial value of "itself". */
4567 else if (loop_dump_stream)
4568 fprintf (loop_dump_stream, "is complex\n");
4573 /* Search the loop for general induction variables. */
4575 static void
4576 loop_givs_find (loop)
4577 struct loop* loop;
4579 for_each_insn_in_loop (loop, check_insn_for_givs);
4583 /* For each giv for which we still don't know whether or not it is
4584 replaceable, check to see if it is replaceable because its final value
4585 can be calculated. */
4587 static void
4588 loop_givs_check (loop)
4589 struct loop *loop;
4591 struct loop_ivs *ivs = LOOP_IVS (loop);
4592 struct iv_class *bl;
4594 for (bl = ivs->list; bl; bl = bl->next)
4596 struct induction *v;
4598 for (v = bl->giv; v; v = v->next_iv)
4599 if (! v->replaceable && ! v->not_replaceable)
4600 check_final_value (loop, v);
4605 /* Return nonzero if it is possible to eliminate the biv BL provided
4606 all givs are reduced. This is possible if either the reg is not
4607 used outside the loop, or we can compute what its final value will
4608 be. */
4610 static int
4611 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4612 struct loop *loop;
4613 struct iv_class *bl;
4614 int threshold;
4615 int insn_count;
4617 /* For architectures with a decrement_and_branch_until_zero insn,
4618 don't do this if we put a REG_NONNEG note on the endtest for this
4619 biv. */
4621 #ifdef HAVE_decrement_and_branch_until_zero
4622 if (bl->nonneg)
4624 if (loop_dump_stream)
4625 fprintf (loop_dump_stream,
4626 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4627 return 0;
4629 #endif
4631 /* Check that biv is used outside loop or if it has a final value.
4632 Compare against bl->init_insn rather than loop->start. We aren't
4633 concerned with any uses of the biv between init_insn and
4634 loop->start since these won't be affected by the value of the biv
4635 elsewhere in the function, so long as init_insn doesn't use the
4636 biv itself. */
4638 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4639 && bl->init_insn
4640 && INSN_UID (bl->init_insn) < max_uid_for_loop
4641 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4642 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4643 || (bl->final_value = final_biv_value (loop, bl)))
4644 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4646 if (loop_dump_stream)
4648 fprintf (loop_dump_stream,
4649 "Cannot eliminate biv %d.\n",
4650 bl->regno);
4651 fprintf (loop_dump_stream,
4652 "First use: insn %d, last use: insn %d.\n",
4653 REGNO_FIRST_UID (bl->regno),
4654 REGNO_LAST_UID (bl->regno));
4656 return 0;
4660 /* Reduce each giv of BL that we have decided to reduce. */
4662 static void
4663 loop_givs_reduce (loop, bl)
4664 struct loop *loop;
4665 struct iv_class *bl;
4667 struct induction *v;
4669 for (v = bl->giv; v; v = v->next_iv)
4671 struct induction *tv;
4672 if (! v->ignore && v->same == 0)
4674 int auto_inc_opt = 0;
4676 /* If the code for derived givs immediately below has already
4677 allocated a new_reg, we must keep it. */
4678 if (! v->new_reg)
4679 v->new_reg = gen_reg_rtx (v->mode);
4681 #ifdef AUTO_INC_DEC
4682 /* If the target has auto-increment addressing modes, and
4683 this is an address giv, then try to put the increment
4684 immediately after its use, so that flow can create an
4685 auto-increment addressing mode. */
4686 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4687 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4688 /* We don't handle reversed biv's because bl->biv->insn
4689 does not have a valid INSN_LUID. */
4690 && ! bl->reversed
4691 && v->always_executed && ! v->maybe_multiple
4692 && INSN_UID (v->insn) < max_uid_for_loop)
4694 /* If other giv's have been combined with this one, then
4695 this will work only if all uses of the other giv's occur
4696 before this giv's insn. This is difficult to check.
4698 We simplify this by looking for the common case where
4699 there is one DEST_REG giv, and this giv's insn is the
4700 last use of the dest_reg of that DEST_REG giv. If the
4701 increment occurs after the address giv, then we can
4702 perform the optimization. (Otherwise, the increment
4703 would have to go before other_giv, and we would not be
4704 able to combine it with the address giv to get an
4705 auto-inc address.) */
4706 if (v->combined_with)
4708 struct induction *other_giv = 0;
4710 for (tv = bl->giv; tv; tv = tv->next_iv)
4711 if (tv->same == v)
4713 if (other_giv)
4714 break;
4715 else
4716 other_giv = tv;
4718 if (! tv && other_giv
4719 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4720 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4721 == INSN_UID (v->insn))
4722 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4723 auto_inc_opt = 1;
4725 /* Check for case where increment is before the address
4726 giv. Do this test in "loop order". */
4727 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4728 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4729 || (INSN_LUID (bl->biv->insn)
4730 > INSN_LUID (loop->scan_start))))
4731 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4732 && (INSN_LUID (loop->scan_start)
4733 < INSN_LUID (bl->biv->insn))))
4734 auto_inc_opt = -1;
4735 else
4736 auto_inc_opt = 1;
4738 #ifdef HAVE_cc0
4740 rtx prev;
4742 /* We can't put an insn immediately after one setting
4743 cc0, or immediately before one using cc0. */
4744 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4745 || (auto_inc_opt == -1
4746 && (prev = prev_nonnote_insn (v->insn)) != 0
4747 && INSN_P (prev)
4748 && sets_cc0_p (PATTERN (prev))))
4749 auto_inc_opt = 0;
4751 #endif
4753 if (auto_inc_opt)
4754 v->auto_inc_opt = 1;
4756 #endif
4758 /* For each place where the biv is incremented, add an insn
4759 to increment the new, reduced reg for the giv. */
4760 for (tv = bl->biv; tv; tv = tv->next_iv)
4762 rtx insert_before;
4764 if (! auto_inc_opt)
4765 insert_before = NEXT_INSN (tv->insn);
4766 else if (auto_inc_opt == 1)
4767 insert_before = NEXT_INSN (v->insn);
4768 else
4769 insert_before = v->insn;
4771 if (tv->mult_val == const1_rtx)
4772 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4773 v->new_reg, v->new_reg,
4774 0, insert_before);
4775 else /* tv->mult_val == const0_rtx */
4776 /* A multiply is acceptable here
4777 since this is presumed to be seldom executed. */
4778 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4779 v->add_val, v->new_reg,
4780 0, insert_before);
4783 /* Add code at loop start to initialize giv's reduced reg. */
4785 loop_iv_add_mult_hoist (loop,
4786 extend_value_for_giv (v, bl->initial_value),
4787 v->mult_val, v->add_val, v->new_reg);
4793 /* Check for givs whose first use is their definition and whose
4794 last use is the definition of another giv. If so, it is likely
4795 dead and should not be used to derive another giv nor to
4796 eliminate a biv. */
4798 static void
4799 loop_givs_dead_check (loop, bl)
4800 struct loop *loop ATTRIBUTE_UNUSED;
4801 struct iv_class *bl;
4803 struct induction *v;
4805 for (v = bl->giv; v; v = v->next_iv)
4807 if (v->ignore
4808 || (v->same && v->same->ignore))
4809 continue;
4811 if (v->giv_type == DEST_REG
4812 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4814 struct induction *v1;
4816 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4817 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4818 v->maybe_dead = 1;
4824 static void
4825 loop_givs_rescan (loop, bl, reg_map)
4826 struct loop *loop;
4827 struct iv_class *bl;
4828 rtx *reg_map;
4830 struct induction *v;
4832 for (v = bl->giv; v; v = v->next_iv)
4834 if (v->same && v->same->ignore)
4835 v->ignore = 1;
4837 if (v->ignore)
4838 continue;
4840 /* Update expression if this was combined, in case other giv was
4841 replaced. */
4842 if (v->same)
4843 v->new_reg = replace_rtx (v->new_reg,
4844 v->same->dest_reg, v->same->new_reg);
4846 /* See if this register is known to be a pointer to something. If
4847 so, see if we can find the alignment. First see if there is a
4848 destination register that is a pointer. If so, this shares the
4849 alignment too. Next see if we can deduce anything from the
4850 computational information. If not, and this is a DEST_ADDR
4851 giv, at least we know that it's a pointer, though we don't know
4852 the alignment. */
4853 if (GET_CODE (v->new_reg) == REG
4854 && v->giv_type == DEST_REG
4855 && REG_POINTER (v->dest_reg))
4856 mark_reg_pointer (v->new_reg,
4857 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4858 else if (GET_CODE (v->new_reg) == REG
4859 && REG_POINTER (v->src_reg))
4861 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4863 if (align == 0
4864 || GET_CODE (v->add_val) != CONST_INT
4865 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4866 align = 0;
4868 mark_reg_pointer (v->new_reg, align);
4870 else if (GET_CODE (v->new_reg) == REG
4871 && GET_CODE (v->add_val) == REG
4872 && REG_POINTER (v->add_val))
4874 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4876 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4877 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4878 align = 0;
4880 mark_reg_pointer (v->new_reg, align);
4882 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4883 mark_reg_pointer (v->new_reg, 0);
4885 if (v->giv_type == DEST_ADDR)
4886 /* Store reduced reg as the address in the memref where we found
4887 this giv. */
4888 validate_change (v->insn, v->location, v->new_reg, 0);
4889 else if (v->replaceable)
4891 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4893 else
4895 rtx original_insn = v->insn;
4896 rtx note;
4898 /* Not replaceable; emit an insn to set the original giv reg from
4899 the reduced giv, same as above. */
4900 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4901 gen_move_insn (v->dest_reg,
4902 v->new_reg));
4904 /* The original insn may have a REG_EQUAL note. This note is
4905 now incorrect and may result in invalid substitutions later.
4906 The original insn is dead, but may be part of a libcall
4907 sequence, which doesn't seem worth the bother of handling. */
4908 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4909 if (note)
4910 remove_note (original_insn, note);
4913 /* When a loop is reversed, givs which depend on the reversed
4914 biv, and which are live outside the loop, must be set to their
4915 correct final value. This insn is only needed if the giv is
4916 not replaceable. The correct final value is the same as the
4917 value that the giv starts the reversed loop with. */
4918 if (bl->reversed && ! v->replaceable)
4919 loop_iv_add_mult_sink (loop,
4920 extend_value_for_giv (v, bl->initial_value),
4921 v->mult_val, v->add_val, v->dest_reg);
4922 else if (v->final_value)
4923 loop_insn_sink_or_swim (loop,
4924 gen_load_of_final_value (v->dest_reg,
4925 v->final_value));
4927 if (loop_dump_stream)
4929 fprintf (loop_dump_stream, "giv at %d reduced to ",
4930 INSN_UID (v->insn));
4931 print_simple_rtl (loop_dump_stream, v->new_reg);
4932 fprintf (loop_dump_stream, "\n");
4938 static int
4939 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4940 struct loop *loop ATTRIBUTE_UNUSED;
4941 struct iv_class *bl;
4942 struct induction *v;
4943 rtx test_reg;
4945 int add_cost;
4946 int benefit;
4948 benefit = v->benefit;
4949 PUT_MODE (test_reg, v->mode);
4950 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4951 test_reg, test_reg);
4953 /* Reduce benefit if not replaceable, since we will insert a
4954 move-insn to replace the insn that calculates this giv. Don't do
4955 this unless the giv is a user variable, since it will often be
4956 marked non-replaceable because of the duplication of the exit
4957 code outside the loop. In such a case, the copies we insert are
4958 dead and will be deleted. So they don't have a cost. Similar
4959 situations exist. */
4960 /* ??? The new final_[bg]iv_value code does a much better job of
4961 finding replaceable giv's, and hence this code may no longer be
4962 necessary. */
4963 if (! v->replaceable && ! bl->eliminable
4964 && REG_USERVAR_P (v->dest_reg))
4965 benefit -= copy_cost;
4967 /* Decrease the benefit to count the add-insns that we will insert
4968 to increment the reduced reg for the giv. ??? This can
4969 overestimate the run-time cost of the additional insns, e.g. if
4970 there are multiple basic blocks that increment the biv, but only
4971 one of these blocks is executed during each iteration. There is
4972 no good way to detect cases like this with the current structure
4973 of the loop optimizer. This code is more accurate for
4974 determining code size than run-time benefits. */
4975 benefit -= add_cost * bl->biv_count;
4977 /* Decide whether to strength-reduce this giv or to leave the code
4978 unchanged (recompute it from the biv each time it is used). This
4979 decision can be made independently for each giv. */
4981 #ifdef AUTO_INC_DEC
4982 /* Attempt to guess whether autoincrement will handle some of the
4983 new add insns; if so, increase BENEFIT (undo the subtraction of
4984 add_cost that was done above). */
4985 if (v->giv_type == DEST_ADDR
4986 /* Increasing the benefit is risky, since this is only a guess.
4987 Avoid increasing register pressure in cases where there would
4988 be no other benefit from reducing this giv. */
4989 && benefit > 0
4990 && GET_CODE (v->mult_val) == CONST_INT)
4992 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4994 if (HAVE_POST_INCREMENT
4995 && INTVAL (v->mult_val) == size)
4996 benefit += add_cost * bl->biv_count;
4997 else if (HAVE_PRE_INCREMENT
4998 && INTVAL (v->mult_val) == size)
4999 benefit += add_cost * bl->biv_count;
5000 else if (HAVE_POST_DECREMENT
5001 && -INTVAL (v->mult_val) == size)
5002 benefit += add_cost * bl->biv_count;
5003 else if (HAVE_PRE_DECREMENT
5004 && -INTVAL (v->mult_val) == size)
5005 benefit += add_cost * bl->biv_count;
5007 #endif
5009 return benefit;
5013 /* Free IV structures for LOOP. */
5015 static void
5016 loop_ivs_free (loop)
5017 struct loop *loop;
5019 struct loop_ivs *ivs = LOOP_IVS (loop);
5020 struct iv_class *iv = ivs->list;
5022 free (ivs->regs);
5024 while (iv)
5026 struct iv_class *next = iv->next;
5027 struct induction *induction;
5028 struct induction *next_induction;
5030 for (induction = iv->biv; induction; induction = next_induction)
5032 next_induction = induction->next_iv;
5033 free (induction);
5035 for (induction = iv->giv; induction; induction = next_induction)
5037 next_induction = induction->next_iv;
5038 free (induction);
5041 free (iv);
5042 iv = next;
5047 /* Perform strength reduction and induction variable elimination.
5049 Pseudo registers created during this function will be beyond the
5050 last valid index in several tables including
5051 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5052 problem here, because the added registers cannot be givs outside of
5053 their loop, and hence will never be reconsidered. But scan_loop
5054 must check regnos to make sure they are in bounds. */
5056 static void
5057 strength_reduce (loop, flags)
5058 struct loop *loop;
5059 int flags;
5061 struct loop_info *loop_info = LOOP_INFO (loop);
5062 struct loop_regs *regs = LOOP_REGS (loop);
5063 struct loop_ivs *ivs = LOOP_IVS (loop);
5064 rtx p;
5065 /* Temporary list pointer for traversing ivs->list. */
5066 struct iv_class *bl;
5067 /* Ratio of extra register life span we can justify
5068 for saving an instruction. More if loop doesn't call subroutines
5069 since in that case saving an insn makes more difference
5070 and more registers are available. */
5071 /* ??? could set this to last value of threshold in move_movables */
5072 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5073 /* Map of pseudo-register replacements. */
5074 rtx *reg_map = NULL;
5075 int reg_map_size;
5076 int unrolled_insn_copies = 0;
5077 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5078 int insn_count = count_insns_in_loop (loop);
5080 addr_placeholder = gen_reg_rtx (Pmode);
5082 ivs->n_regs = max_reg_before_loop;
5083 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5085 /* Find all BIVs in loop. */
5086 loop_bivs_find (loop);
5088 /* Exit if there are no bivs. */
5089 if (! ivs->list)
5091 /* Can still unroll the loop anyways, but indicate that there is no
5092 strength reduction info available. */
5093 if (flags & LOOP_UNROLL)
5094 unroll_loop (loop, insn_count, 0);
5096 loop_ivs_free (loop);
5097 return;
5100 /* Determine how BIVS are initialized by looking through pre-header
5101 extended basic block. */
5102 loop_bivs_init_find (loop);
5104 /* Look at the each biv and see if we can say anything better about its
5105 initial value from any initializing insns set up above. */
5106 loop_bivs_check (loop);
5108 /* Search the loop for general induction variables. */
5109 loop_givs_find (loop);
5111 /* Try to calculate and save the number of loop iterations. This is
5112 set to zero if the actual number can not be calculated. This must
5113 be called after all giv's have been identified, since otherwise it may
5114 fail if the iteration variable is a giv. */
5115 loop_iterations (loop);
5117 #ifdef HAVE_prefetch
5118 if (flags & LOOP_PREFETCH)
5119 emit_prefetch_instructions (loop);
5120 #endif
5122 /* Now for each giv for which we still don't know whether or not it is
5123 replaceable, check to see if it is replaceable because its final value
5124 can be calculated. This must be done after loop_iterations is called,
5125 so that final_giv_value will work correctly. */
5126 loop_givs_check (loop);
5128 /* Try to prove that the loop counter variable (if any) is always
5129 nonnegative; if so, record that fact with a REG_NONNEG note
5130 so that "decrement and branch until zero" insn can be used. */
5131 check_dbra_loop (loop, insn_count);
5133 /* Create reg_map to hold substitutions for replaceable giv regs.
5134 Some givs might have been made from biv increments, so look at
5135 ivs->reg_iv_type for a suitable size. */
5136 reg_map_size = ivs->n_regs;
5137 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5139 /* Examine each iv class for feasibility of strength reduction/induction
5140 variable elimination. */
5142 for (bl = ivs->list; bl; bl = bl->next)
5144 struct induction *v;
5145 int benefit;
5147 /* Test whether it will be possible to eliminate this biv
5148 provided all givs are reduced. */
5149 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5151 /* This will be true at the end, if all givs which depend on this
5152 biv have been strength reduced.
5153 We can't (currently) eliminate the biv unless this is so. */
5154 bl->all_reduced = 1;
5156 /* Check each extension dependent giv in this class to see if its
5157 root biv is safe from wrapping in the interior mode. */
5158 check_ext_dependent_givs (bl, loop_info);
5160 /* Combine all giv's for this iv_class. */
5161 combine_givs (regs, bl);
5163 for (v = bl->giv; v; v = v->next_iv)
5165 struct induction *tv;
5167 if (v->ignore || v->same)
5168 continue;
5170 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5172 /* If an insn is not to be strength reduced, then set its ignore
5173 flag, and clear bl->all_reduced. */
5175 /* A giv that depends on a reversed biv must be reduced if it is
5176 used after the loop exit, otherwise, it would have the wrong
5177 value after the loop exit. To make it simple, just reduce all
5178 of such giv's whether or not we know they are used after the loop
5179 exit. */
5181 if (! flag_reduce_all_givs
5182 && v->lifetime * threshold * benefit < insn_count
5183 && ! bl->reversed)
5185 if (loop_dump_stream)
5186 fprintf (loop_dump_stream,
5187 "giv of insn %d not worth while, %d vs %d.\n",
5188 INSN_UID (v->insn),
5189 v->lifetime * threshold * benefit, insn_count);
5190 v->ignore = 1;
5191 bl->all_reduced = 0;
5193 else
5195 /* Check that we can increment the reduced giv without a
5196 multiply insn. If not, reject it. */
5198 for (tv = bl->biv; tv; tv = tv->next_iv)
5199 if (tv->mult_val == const1_rtx
5200 && ! product_cheap_p (tv->add_val, v->mult_val))
5202 if (loop_dump_stream)
5203 fprintf (loop_dump_stream,
5204 "giv of insn %d: would need a multiply.\n",
5205 INSN_UID (v->insn));
5206 v->ignore = 1;
5207 bl->all_reduced = 0;
5208 break;
5213 /* Check for givs whose first use is their definition and whose
5214 last use is the definition of another giv. If so, it is likely
5215 dead and should not be used to derive another giv nor to
5216 eliminate a biv. */
5217 loop_givs_dead_check (loop, bl);
5219 /* Reduce each giv that we decided to reduce. */
5220 loop_givs_reduce (loop, bl);
5222 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5223 as not reduced.
5225 For each giv register that can be reduced now: if replaceable,
5226 substitute reduced reg wherever the old giv occurs;
5227 else add new move insn "giv_reg = reduced_reg". */
5228 loop_givs_rescan (loop, bl, reg_map);
5230 /* All the givs based on the biv bl have been reduced if they
5231 merit it. */
5233 /* For each giv not marked as maybe dead that has been combined with a
5234 second giv, clear any "maybe dead" mark on that second giv.
5235 v->new_reg will either be or refer to the register of the giv it
5236 combined with.
5238 Doing this clearing avoids problems in biv elimination where
5239 a giv's new_reg is a complex value that can't be put in the
5240 insn but the giv combined with (with a reg as new_reg) is
5241 marked maybe_dead. Since the register will be used in either
5242 case, we'd prefer it be used from the simpler giv. */
5244 for (v = bl->giv; v; v = v->next_iv)
5245 if (! v->maybe_dead && v->same)
5246 v->same->maybe_dead = 0;
5248 /* Try to eliminate the biv, if it is a candidate.
5249 This won't work if ! bl->all_reduced,
5250 since the givs we planned to use might not have been reduced.
5252 We have to be careful that we didn't initially think we could
5253 eliminate this biv because of a giv that we now think may be
5254 dead and shouldn't be used as a biv replacement.
5256 Also, there is the possibility that we may have a giv that looks
5257 like it can be used to eliminate a biv, but the resulting insn
5258 isn't valid. This can happen, for example, on the 88k, where a
5259 JUMP_INSN can compare a register only with zero. Attempts to
5260 replace it with a compare with a constant will fail.
5262 Note that in cases where this call fails, we may have replaced some
5263 of the occurrences of the biv with a giv, but no harm was done in
5264 doing so in the rare cases where it can occur. */
5266 if (bl->all_reduced == 1 && bl->eliminable
5267 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5269 /* ?? If we created a new test to bypass the loop entirely,
5270 or otherwise drop straight in, based on this test, then
5271 we might want to rewrite it also. This way some later
5272 pass has more hope of removing the initialization of this
5273 biv entirely. */
5275 /* If final_value != 0, then the biv may be used after loop end
5276 and we must emit an insn to set it just in case.
5278 Reversed bivs already have an insn after the loop setting their
5279 value, so we don't need another one. We can't calculate the
5280 proper final value for such a biv here anyways. */
5281 if (bl->final_value && ! bl->reversed)
5282 loop_insn_sink_or_swim (loop,
5283 gen_load_of_final_value (bl->biv->dest_reg,
5284 bl->final_value));
5286 if (loop_dump_stream)
5287 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5288 bl->regno);
5290 /* See above note wrt final_value. But since we couldn't eliminate
5291 the biv, we must set the value after the loop instead of before. */
5292 else if (bl->final_value && ! bl->reversed)
5293 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5294 bl->final_value));
5297 /* Go through all the instructions in the loop, making all the
5298 register substitutions scheduled in REG_MAP. */
5300 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5301 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5302 || GET_CODE (p) == CALL_INSN)
5304 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5305 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5306 INSN_CODE (p) = -1;
5309 if (loop_info->n_iterations > 0)
5311 /* When we completely unroll a loop we will likely not need the increment
5312 of the loop BIV and we will not need the conditional branch at the
5313 end of the loop. */
5314 unrolled_insn_copies = insn_count - 2;
5316 #ifdef HAVE_cc0
5317 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5318 need the comparison before the conditional branch at the end of the
5319 loop. */
5320 unrolled_insn_copies -= 1;
5321 #endif
5323 /* We'll need one copy for each loop iteration. */
5324 unrolled_insn_copies *= loop_info->n_iterations;
5326 /* A little slop to account for the ability to remove initialization
5327 code, better CSE, and other secondary benefits of completely
5328 unrolling some loops. */
5329 unrolled_insn_copies -= 1;
5331 /* Clamp the value. */
5332 if (unrolled_insn_copies < 0)
5333 unrolled_insn_copies = 0;
5336 /* Unroll loops from within strength reduction so that we can use the
5337 induction variable information that strength_reduce has already
5338 collected. Always unroll loops that would be as small or smaller
5339 unrolled than when rolled. */
5340 if ((flags & LOOP_UNROLL)
5341 || ((flags & LOOP_AUTO_UNROLL)
5342 && loop_info->n_iterations > 0
5343 && unrolled_insn_copies <= insn_count))
5344 unroll_loop (loop, insn_count, 1);
5346 #ifdef HAVE_doloop_end
5347 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5348 doloop_optimize (loop);
5349 #endif /* HAVE_doloop_end */
5351 /* In case number of iterations is known, drop branch prediction note
5352 in the branch. Do that only in second loop pass, as loop unrolling
5353 may change the number of iterations performed. */
5354 if (flags & LOOP_BCT)
5356 unsigned HOST_WIDE_INT n
5357 = loop_info->n_iterations / loop_info->unroll_number;
5358 if (n > 1)
5359 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
5360 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5363 if (loop_dump_stream)
5364 fprintf (loop_dump_stream, "\n");
5366 loop_ivs_free (loop);
5367 if (reg_map)
5368 free (reg_map);
5371 /*Record all basic induction variables calculated in the insn. */
5372 static rtx
5373 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5374 struct loop *loop;
5375 rtx p;
5376 int not_every_iteration;
5377 int maybe_multiple;
5379 struct loop_ivs *ivs = LOOP_IVS (loop);
5380 rtx set;
5381 rtx dest_reg;
5382 rtx inc_val;
5383 rtx mult_val;
5384 rtx *location;
5386 if (GET_CODE (p) == INSN
5387 && (set = single_set (p))
5388 && GET_CODE (SET_DEST (set)) == REG)
5390 dest_reg = SET_DEST (set);
5391 if (REGNO (dest_reg) < max_reg_before_loop
5392 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5393 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5395 if (basic_induction_var (loop, SET_SRC (set),
5396 GET_MODE (SET_SRC (set)),
5397 dest_reg, p, &inc_val, &mult_val,
5398 &location))
5400 /* It is a possible basic induction variable.
5401 Create and initialize an induction structure for it. */
5403 struct induction *v
5404 = (struct induction *) xmalloc (sizeof (struct induction));
5406 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5407 not_every_iteration, maybe_multiple);
5408 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5410 else if (REGNO (dest_reg) < ivs->n_regs)
5411 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5414 return p;
5417 /* Record all givs calculated in the insn.
5418 A register is a giv if: it is only set once, it is a function of a
5419 biv and a constant (or invariant), and it is not a biv. */
5420 static rtx
5421 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5422 struct loop *loop;
5423 rtx p;
5424 int not_every_iteration;
5425 int maybe_multiple;
5427 struct loop_regs *regs = LOOP_REGS (loop);
5429 rtx set;
5430 /* Look for a general induction variable in a register. */
5431 if (GET_CODE (p) == INSN
5432 && (set = single_set (p))
5433 && GET_CODE (SET_DEST (set)) == REG
5434 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5436 rtx src_reg;
5437 rtx dest_reg;
5438 rtx add_val;
5439 rtx mult_val;
5440 rtx ext_val;
5441 int benefit;
5442 rtx regnote = 0;
5443 rtx last_consec_insn;
5445 dest_reg = SET_DEST (set);
5446 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5447 return p;
5449 if (/* SET_SRC is a giv. */
5450 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5451 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5452 /* Equivalent expression is a giv. */
5453 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5454 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5455 &add_val, &mult_val, &ext_val, 0,
5456 &benefit, VOIDmode)))
5457 /* Don't try to handle any regs made by loop optimization.
5458 We have nothing on them in regno_first_uid, etc. */
5459 && REGNO (dest_reg) < max_reg_before_loop
5460 /* Don't recognize a BASIC_INDUCT_VAR here. */
5461 && dest_reg != src_reg
5462 /* This must be the only place where the register is set. */
5463 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5464 /* or all sets must be consecutive and make a giv. */
5465 || (benefit = consec_sets_giv (loop, benefit, p,
5466 src_reg, dest_reg,
5467 &add_val, &mult_val, &ext_val,
5468 &last_consec_insn))))
5470 struct induction *v
5471 = (struct induction *) xmalloc (sizeof (struct induction));
5473 /* If this is a library call, increase benefit. */
5474 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5475 benefit += libcall_benefit (p);
5477 /* Skip the consecutive insns, if there are any. */
5478 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5479 p = last_consec_insn;
5481 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5482 ext_val, benefit, DEST_REG, not_every_iteration,
5483 maybe_multiple, (rtx*) 0);
5488 #ifndef DONT_REDUCE_ADDR
5489 /* Look for givs which are memory addresses. */
5490 /* This resulted in worse code on a VAX 8600. I wonder if it
5491 still does. */
5492 if (GET_CODE (p) == INSN)
5493 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5494 maybe_multiple);
5495 #endif
5497 /* Update the status of whether giv can derive other givs. This can
5498 change when we pass a label or an insn that updates a biv. */
5499 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5500 || GET_CODE (p) == CODE_LABEL)
5501 update_giv_derive (loop, p);
5502 return p;
5505 /* Return 1 if X is a valid source for an initial value (or as value being
5506 compared against in an initial test).
5508 X must be either a register or constant and must not be clobbered between
5509 the current insn and the start of the loop.
5511 INSN is the insn containing X. */
5513 static int
5514 valid_initial_value_p (x, insn, call_seen, loop_start)
5515 rtx x;
5516 rtx insn;
5517 int call_seen;
5518 rtx loop_start;
5520 if (CONSTANT_P (x))
5521 return 1;
5523 /* Only consider pseudos we know about initialized in insns whose luids
5524 we know. */
5525 if (GET_CODE (x) != REG
5526 || REGNO (x) >= max_reg_before_loop)
5527 return 0;
5529 /* Don't use call-clobbered registers across a call which clobbers it. On
5530 some machines, don't use any hard registers at all. */
5531 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5532 && (SMALL_REGISTER_CLASSES
5533 || (call_used_regs[REGNO (x)] && call_seen)))
5534 return 0;
5536 /* Don't use registers that have been clobbered before the start of the
5537 loop. */
5538 if (reg_set_between_p (x, insn, loop_start))
5539 return 0;
5541 return 1;
5544 /* Scan X for memory refs and check each memory address
5545 as a possible giv. INSN is the insn whose pattern X comes from.
5546 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5547 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5548 more thanonce in each loop iteration. */
5550 static void
5551 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5552 const struct loop *loop;
5553 rtx x;
5554 rtx insn;
5555 int not_every_iteration, maybe_multiple;
5557 int i, j;
5558 enum rtx_code code;
5559 const char *fmt;
5561 if (x == 0)
5562 return;
5564 code = GET_CODE (x);
5565 switch (code)
5567 case REG:
5568 case CONST_INT:
5569 case CONST:
5570 case CONST_DOUBLE:
5571 case SYMBOL_REF:
5572 case LABEL_REF:
5573 case PC:
5574 case CC0:
5575 case ADDR_VEC:
5576 case ADDR_DIFF_VEC:
5577 case USE:
5578 case CLOBBER:
5579 return;
5581 case MEM:
5583 rtx src_reg;
5584 rtx add_val;
5585 rtx mult_val;
5586 rtx ext_val;
5587 int benefit;
5589 /* This code used to disable creating GIVs with mult_val == 1 and
5590 add_val == 0. However, this leads to lost optimizations when
5591 it comes time to combine a set of related DEST_ADDR GIVs, since
5592 this one would not be seen. */
5594 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5595 &mult_val, &ext_val, 1, &benefit,
5596 GET_MODE (x)))
5598 /* Found one; record it. */
5599 struct induction *v
5600 = (struct induction *) xmalloc (sizeof (struct induction));
5602 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5603 add_val, ext_val, benefit, DEST_ADDR,
5604 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5606 v->mem = x;
5609 return;
5611 default:
5612 break;
5615 /* Recursively scan the subexpressions for other mem refs. */
5617 fmt = GET_RTX_FORMAT (code);
5618 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5619 if (fmt[i] == 'e')
5620 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5621 maybe_multiple);
5622 else if (fmt[i] == 'E')
5623 for (j = 0; j < XVECLEN (x, i); j++)
5624 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5625 maybe_multiple);
5628 /* Fill in the data about one biv update.
5629 V is the `struct induction' in which we record the biv. (It is
5630 allocated by the caller, with alloca.)
5631 INSN is the insn that sets it.
5632 DEST_REG is the biv's reg.
5634 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5635 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5636 being set to INC_VAL.
5638 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5639 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5640 can be executed more than once per iteration. If MAYBE_MULTIPLE
5641 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5642 executed exactly once per iteration. */
5644 static void
5645 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5646 not_every_iteration, maybe_multiple)
5647 struct loop *loop;
5648 struct induction *v;
5649 rtx insn;
5650 rtx dest_reg;
5651 rtx inc_val;
5652 rtx mult_val;
5653 rtx *location;
5654 int not_every_iteration;
5655 int maybe_multiple;
5657 struct loop_ivs *ivs = LOOP_IVS (loop);
5658 struct iv_class *bl;
5660 v->insn = insn;
5661 v->src_reg = dest_reg;
5662 v->dest_reg = dest_reg;
5663 v->mult_val = mult_val;
5664 v->add_val = inc_val;
5665 v->ext_dependent = NULL_RTX;
5666 v->location = location;
5667 v->mode = GET_MODE (dest_reg);
5668 v->always_computable = ! not_every_iteration;
5669 v->always_executed = ! not_every_iteration;
5670 v->maybe_multiple = maybe_multiple;
5672 /* Add this to the reg's iv_class, creating a class
5673 if this is the first incrementation of the reg. */
5675 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5676 if (bl == 0)
5678 /* Create and initialize new iv_class. */
5680 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5682 bl->regno = REGNO (dest_reg);
5683 bl->biv = 0;
5684 bl->giv = 0;
5685 bl->biv_count = 0;
5686 bl->giv_count = 0;
5688 /* Set initial value to the reg itself. */
5689 bl->initial_value = dest_reg;
5690 bl->final_value = 0;
5691 /* We haven't seen the initializing insn yet */
5692 bl->init_insn = 0;
5693 bl->init_set = 0;
5694 bl->initial_test = 0;
5695 bl->incremented = 0;
5696 bl->eliminable = 0;
5697 bl->nonneg = 0;
5698 bl->reversed = 0;
5699 bl->total_benefit = 0;
5701 /* Add this class to ivs->list. */
5702 bl->next = ivs->list;
5703 ivs->list = bl;
5705 /* Put it in the array of biv register classes. */
5706 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5709 /* Update IV_CLASS entry for this biv. */
5710 v->next_iv = bl->biv;
5711 bl->biv = v;
5712 bl->biv_count++;
5713 if (mult_val == const1_rtx)
5714 bl->incremented = 1;
5716 if (loop_dump_stream)
5717 loop_biv_dump (v, loop_dump_stream, 0);
5720 /* Fill in the data about one giv.
5721 V is the `struct induction' in which we record the giv. (It is
5722 allocated by the caller, with alloca.)
5723 INSN is the insn that sets it.
5724 BENEFIT estimates the savings from deleting this insn.
5725 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5726 into a register or is used as a memory address.
5728 SRC_REG is the biv reg which the giv is computed from.
5729 DEST_REG is the giv's reg (if the giv is stored in a reg).
5730 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5731 LOCATION points to the place where this giv's value appears in INSN. */
5733 static void
5734 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5735 benefit, type, not_every_iteration, maybe_multiple, location)
5736 const struct loop *loop;
5737 struct induction *v;
5738 rtx insn;
5739 rtx src_reg;
5740 rtx dest_reg;
5741 rtx mult_val, add_val, ext_val;
5742 int benefit;
5743 enum g_types type;
5744 int not_every_iteration, maybe_multiple;
5745 rtx *location;
5747 struct loop_ivs *ivs = LOOP_IVS (loop);
5748 struct induction *b;
5749 struct iv_class *bl;
5750 rtx set = single_set (insn);
5751 rtx temp;
5753 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5754 undo the MULT canonicalization that we performed earlier. */
5755 temp = simplify_rtx (add_val);
5756 if (temp
5757 && ! (GET_CODE (add_val) == MULT
5758 && GET_CODE (temp) == ASHIFT))
5759 add_val = temp;
5761 v->insn = insn;
5762 v->src_reg = src_reg;
5763 v->giv_type = type;
5764 v->dest_reg = dest_reg;
5765 v->mult_val = mult_val;
5766 v->add_val = add_val;
5767 v->ext_dependent = ext_val;
5768 v->benefit = benefit;
5769 v->location = location;
5770 v->cant_derive = 0;
5771 v->combined_with = 0;
5772 v->maybe_multiple = maybe_multiple;
5773 v->maybe_dead = 0;
5774 v->derive_adjustment = 0;
5775 v->same = 0;
5776 v->ignore = 0;
5777 v->new_reg = 0;
5778 v->final_value = 0;
5779 v->same_insn = 0;
5780 v->auto_inc_opt = 0;
5781 v->unrolled = 0;
5782 v->shared = 0;
5784 /* The v->always_computable field is used in update_giv_derive, to
5785 determine whether a giv can be used to derive another giv. For a
5786 DEST_REG giv, INSN computes a new value for the giv, so its value
5787 isn't computable if INSN insn't executed every iteration.
5788 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5789 it does not compute a new value. Hence the value is always computable
5790 regardless of whether INSN is executed each iteration. */
5792 if (type == DEST_ADDR)
5793 v->always_computable = 1;
5794 else
5795 v->always_computable = ! not_every_iteration;
5797 v->always_executed = ! not_every_iteration;
5799 if (type == DEST_ADDR)
5801 v->mode = GET_MODE (*location);
5802 v->lifetime = 1;
5804 else /* type == DEST_REG */
5806 v->mode = GET_MODE (SET_DEST (set));
5808 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5810 /* If the lifetime is zero, it means that this register is
5811 really a dead store. So mark this as a giv that can be
5812 ignored. This will not prevent the biv from being eliminated. */
5813 if (v->lifetime == 0)
5814 v->ignore = 1;
5816 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5817 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5820 /* Add the giv to the class of givs computed from one biv. */
5822 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5823 if (bl)
5825 v->next_iv = bl->giv;
5826 bl->giv = v;
5827 /* Don't count DEST_ADDR. This is supposed to count the number of
5828 insns that calculate givs. */
5829 if (type == DEST_REG)
5830 bl->giv_count++;
5831 bl->total_benefit += benefit;
5833 else
5834 /* Fatal error, biv missing for this giv? */
5835 abort ();
5837 if (type == DEST_ADDR)
5839 v->replaceable = 1;
5840 v->not_replaceable = 0;
5842 else
5844 /* The giv can be replaced outright by the reduced register only if all
5845 of the following conditions are true:
5846 - the insn that sets the giv is always executed on any iteration
5847 on which the giv is used at all
5848 (there are two ways to deduce this:
5849 either the insn is executed on every iteration,
5850 or all uses follow that insn in the same basic block),
5851 - the giv is not used outside the loop
5852 - no assignments to the biv occur during the giv's lifetime. */
5854 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5855 /* Previous line always fails if INSN was moved by loop opt. */
5856 && REGNO_LAST_LUID (REGNO (dest_reg))
5857 < INSN_LUID (loop->end)
5858 && (! not_every_iteration
5859 || last_use_this_basic_block (dest_reg, insn)))
5861 /* Now check that there are no assignments to the biv within the
5862 giv's lifetime. This requires two separate checks. */
5864 /* Check each biv update, and fail if any are between the first
5865 and last use of the giv.
5867 If this loop contains an inner loop that was unrolled, then
5868 the insn modifying the biv may have been emitted by the loop
5869 unrolling code, and hence does not have a valid luid. Just
5870 mark the biv as not replaceable in this case. It is not very
5871 useful as a biv, because it is used in two different loops.
5872 It is very unlikely that we would be able to optimize the giv
5873 using this biv anyways. */
5875 v->replaceable = 1;
5876 v->not_replaceable = 0;
5877 for (b = bl->biv; b; b = b->next_iv)
5879 if (INSN_UID (b->insn) >= max_uid_for_loop
5880 || ((INSN_LUID (b->insn)
5881 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5882 && (INSN_LUID (b->insn)
5883 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5885 v->replaceable = 0;
5886 v->not_replaceable = 1;
5887 break;
5891 /* If there are any backwards branches that go from after the
5892 biv update to before it, then this giv is not replaceable. */
5893 if (v->replaceable)
5894 for (b = bl->biv; b; b = b->next_iv)
5895 if (back_branch_in_range_p (loop, b->insn))
5897 v->replaceable = 0;
5898 v->not_replaceable = 1;
5899 break;
5902 else
5904 /* May still be replaceable, we don't have enough info here to
5905 decide. */
5906 v->replaceable = 0;
5907 v->not_replaceable = 0;
5911 /* Record whether the add_val contains a const_int, for later use by
5912 combine_givs. */
5914 rtx tem = add_val;
5916 v->no_const_addval = 1;
5917 if (tem == const0_rtx)
5919 else if (CONSTANT_P (add_val))
5920 v->no_const_addval = 0;
5921 if (GET_CODE (tem) == PLUS)
5923 while (1)
5925 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5926 tem = XEXP (tem, 0);
5927 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5928 tem = XEXP (tem, 1);
5929 else
5930 break;
5932 if (CONSTANT_P (XEXP (tem, 1)))
5933 v->no_const_addval = 0;
5937 if (loop_dump_stream)
5938 loop_giv_dump (v, loop_dump_stream, 0);
5941 /* All this does is determine whether a giv can be made replaceable because
5942 its final value can be calculated. This code can not be part of record_giv
5943 above, because final_giv_value requires that the number of loop iterations
5944 be known, and that can not be accurately calculated until after all givs
5945 have been identified. */
5947 static void
5948 check_final_value (loop, v)
5949 const struct loop *loop;
5950 struct induction *v;
5952 struct loop_ivs *ivs = LOOP_IVS (loop);
5953 struct iv_class *bl;
5954 rtx final_value = 0;
5956 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5958 /* DEST_ADDR givs will never reach here, because they are always marked
5959 replaceable above in record_giv. */
5961 /* The giv can be replaced outright by the reduced register only if all
5962 of the following conditions are true:
5963 - the insn that sets the giv is always executed on any iteration
5964 on which the giv is used at all
5965 (there are two ways to deduce this:
5966 either the insn is executed on every iteration,
5967 or all uses follow that insn in the same basic block),
5968 - its final value can be calculated (this condition is different
5969 than the one above in record_giv)
5970 - it's not used before the it's set
5971 - no assignments to the biv occur during the giv's lifetime. */
5973 #if 0
5974 /* This is only called now when replaceable is known to be false. */
5975 /* Clear replaceable, so that it won't confuse final_giv_value. */
5976 v->replaceable = 0;
5977 #endif
5979 if ((final_value = final_giv_value (loop, v))
5980 && (v->always_executed
5981 || last_use_this_basic_block (v->dest_reg, v->insn)))
5983 int biv_increment_seen = 0, before_giv_insn = 0;
5984 rtx p = v->insn;
5985 rtx last_giv_use;
5987 v->replaceable = 1;
5988 v->not_replaceable = 0;
5990 /* When trying to determine whether or not a biv increment occurs
5991 during the lifetime of the giv, we can ignore uses of the variable
5992 outside the loop because final_value is true. Hence we can not
5993 use regno_last_uid and regno_first_uid as above in record_giv. */
5995 /* Search the loop to determine whether any assignments to the
5996 biv occur during the giv's lifetime. Start with the insn
5997 that sets the giv, and search around the loop until we come
5998 back to that insn again.
6000 Also fail if there is a jump within the giv's lifetime that jumps
6001 to somewhere outside the lifetime but still within the loop. This
6002 catches spaghetti code where the execution order is not linear, and
6003 hence the above test fails. Here we assume that the giv lifetime
6004 does not extend from one iteration of the loop to the next, so as
6005 to make the test easier. Since the lifetime isn't known yet,
6006 this requires two loops. See also record_giv above. */
6008 last_giv_use = v->insn;
6010 while (1)
6012 p = NEXT_INSN (p);
6013 if (p == loop->end)
6015 before_giv_insn = 1;
6016 p = NEXT_INSN (loop->start);
6018 if (p == v->insn)
6019 break;
6021 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
6022 || GET_CODE (p) == CALL_INSN)
6024 /* It is possible for the BIV increment to use the GIV if we
6025 have a cycle. Thus we must be sure to check each insn for
6026 both BIV and GIV uses, and we must check for BIV uses
6027 first. */
6029 if (! biv_increment_seen
6030 && reg_set_p (v->src_reg, PATTERN (p)))
6031 biv_increment_seen = 1;
6033 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6035 if (biv_increment_seen || before_giv_insn)
6037 v->replaceable = 0;
6038 v->not_replaceable = 1;
6039 break;
6041 last_giv_use = p;
6046 /* Now that the lifetime of the giv is known, check for branches
6047 from within the lifetime to outside the lifetime if it is still
6048 replaceable. */
6050 if (v->replaceable)
6052 p = v->insn;
6053 while (1)
6055 p = NEXT_INSN (p);
6056 if (p == loop->end)
6057 p = NEXT_INSN (loop->start);
6058 if (p == last_giv_use)
6059 break;
6061 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6062 && LABEL_NAME (JUMP_LABEL (p))
6063 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6064 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6065 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6066 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6068 v->replaceable = 0;
6069 v->not_replaceable = 1;
6071 if (loop_dump_stream)
6072 fprintf (loop_dump_stream,
6073 "Found branch outside giv lifetime.\n");
6075 break;
6080 /* If it is replaceable, then save the final value. */
6081 if (v->replaceable)
6082 v->final_value = final_value;
6085 if (loop_dump_stream && v->replaceable)
6086 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6087 INSN_UID (v->insn), REGNO (v->dest_reg));
6090 /* Update the status of whether a giv can derive other givs.
6092 We need to do something special if there is or may be an update to the biv
6093 between the time the giv is defined and the time it is used to derive
6094 another giv.
6096 In addition, a giv that is only conditionally set is not allowed to
6097 derive another giv once a label has been passed.
6099 The cases we look at are when a label or an update to a biv is passed. */
6101 static void
6102 update_giv_derive (loop, p)
6103 const struct loop *loop;
6104 rtx p;
6106 struct loop_ivs *ivs = LOOP_IVS (loop);
6107 struct iv_class *bl;
6108 struct induction *biv, *giv;
6109 rtx tem;
6110 int dummy;
6112 /* Search all IV classes, then all bivs, and finally all givs.
6114 There are three cases we are concerned with. First we have the situation
6115 of a giv that is only updated conditionally. In that case, it may not
6116 derive any givs after a label is passed.
6118 The second case is when a biv update occurs, or may occur, after the
6119 definition of a giv. For certain biv updates (see below) that are
6120 known to occur between the giv definition and use, we can adjust the
6121 giv definition. For others, or when the biv update is conditional,
6122 we must prevent the giv from deriving any other givs. There are two
6123 sub-cases within this case.
6125 If this is a label, we are concerned with any biv update that is done
6126 conditionally, since it may be done after the giv is defined followed by
6127 a branch here (actually, we need to pass both a jump and a label, but
6128 this extra tracking doesn't seem worth it).
6130 If this is a jump, we are concerned about any biv update that may be
6131 executed multiple times. We are actually only concerned about
6132 backward jumps, but it is probably not worth performing the test
6133 on the jump again here.
6135 If this is a biv update, we must adjust the giv status to show that a
6136 subsequent biv update was performed. If this adjustment cannot be done,
6137 the giv cannot derive further givs. */
6139 for (bl = ivs->list; bl; bl = bl->next)
6140 for (biv = bl->biv; biv; biv = biv->next_iv)
6141 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6142 || biv->insn == p)
6144 for (giv = bl->giv; giv; giv = giv->next_iv)
6146 /* If cant_derive is already true, there is no point in
6147 checking all of these conditions again. */
6148 if (giv->cant_derive)
6149 continue;
6151 /* If this giv is conditionally set and we have passed a label,
6152 it cannot derive anything. */
6153 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6154 giv->cant_derive = 1;
6156 /* Skip givs that have mult_val == 0, since
6157 they are really invariants. Also skip those that are
6158 replaceable, since we know their lifetime doesn't contain
6159 any biv update. */
6160 else if (giv->mult_val == const0_rtx || giv->replaceable)
6161 continue;
6163 /* The only way we can allow this giv to derive another
6164 is if this is a biv increment and we can form the product
6165 of biv->add_val and giv->mult_val. In this case, we will
6166 be able to compute a compensation. */
6167 else if (biv->insn == p)
6169 rtx ext_val_dummy;
6171 tem = 0;
6172 if (biv->mult_val == const1_rtx)
6173 tem = simplify_giv_expr (loop,
6174 gen_rtx_MULT (giv->mode,
6175 biv->add_val,
6176 giv->mult_val),
6177 &ext_val_dummy, &dummy);
6179 if (tem && giv->derive_adjustment)
6180 tem = simplify_giv_expr
6181 (loop,
6182 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6183 &ext_val_dummy, &dummy);
6185 if (tem)
6186 giv->derive_adjustment = tem;
6187 else
6188 giv->cant_derive = 1;
6190 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6191 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6192 giv->cant_derive = 1;
6197 /* Check whether an insn is an increment legitimate for a basic induction var.
6198 X is the source of insn P, or a part of it.
6199 MODE is the mode in which X should be interpreted.
6201 DEST_REG is the putative biv, also the destination of the insn.
6202 We accept patterns of these forms:
6203 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6204 REG = INVARIANT + REG
6206 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6207 store the additive term into *INC_VAL, and store the place where
6208 we found the additive term into *LOCATION.
6210 If X is an assignment of an invariant into DEST_REG, we set
6211 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6213 We also want to detect a BIV when it corresponds to a variable
6214 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6215 of the variable may be a PLUS that adds a SUBREG of that variable to
6216 an invariant and then sign- or zero-extends the result of the PLUS
6217 into the variable.
6219 Most GIVs in such cases will be in the promoted mode, since that is the
6220 probably the natural computation mode (and almost certainly the mode
6221 used for addresses) on the machine. So we view the pseudo-reg containing
6222 the variable as the BIV, as if it were simply incremented.
6224 Note that treating the entire pseudo as a BIV will result in making
6225 simple increments to any GIVs based on it. However, if the variable
6226 overflows in its declared mode but not its promoted mode, the result will
6227 be incorrect. This is acceptable if the variable is signed, since
6228 overflows in such cases are undefined, but not if it is unsigned, since
6229 those overflows are defined. So we only check for SIGN_EXTEND and
6230 not ZERO_EXTEND.
6232 If we cannot find a biv, we return 0. */
6234 static int
6235 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6236 const struct loop *loop;
6237 rtx x;
6238 enum machine_mode mode;
6239 rtx dest_reg;
6240 rtx p;
6241 rtx *inc_val;
6242 rtx *mult_val;
6243 rtx **location;
6245 enum rtx_code code;
6246 rtx *argp, arg;
6247 rtx insn, set = 0;
6249 code = GET_CODE (x);
6250 *location = NULL;
6251 switch (code)
6253 case PLUS:
6254 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6255 || (GET_CODE (XEXP (x, 0)) == SUBREG
6256 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6257 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6259 argp = &XEXP (x, 1);
6261 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6262 || (GET_CODE (XEXP (x, 1)) == SUBREG
6263 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6264 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6266 argp = &XEXP (x, 0);
6268 else
6269 return 0;
6271 arg = *argp;
6272 if (loop_invariant_p (loop, arg) != 1)
6273 return 0;
6275 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6276 *mult_val = const1_rtx;
6277 *location = argp;
6278 return 1;
6280 case SUBREG:
6281 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6282 handle addition of promoted variables.
6283 ??? The comment at the start of this function is wrong: promoted
6284 variable increments don't look like it says they do. */
6285 return basic_induction_var (loop, SUBREG_REG (x),
6286 GET_MODE (SUBREG_REG (x)),
6287 dest_reg, p, inc_val, mult_val, location);
6289 case REG:
6290 /* If this register is assigned in a previous insn, look at its
6291 source, but don't go outside the loop or past a label. */
6293 /* If this sets a register to itself, we would repeat any previous
6294 biv increment if we applied this strategy blindly. */
6295 if (rtx_equal_p (dest_reg, x))
6296 return 0;
6298 insn = p;
6299 while (1)
6301 rtx dest;
6304 insn = PREV_INSN (insn);
6306 while (insn && GET_CODE (insn) == NOTE
6307 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6309 if (!insn)
6310 break;
6311 set = single_set (insn);
6312 if (set == 0)
6313 break;
6314 dest = SET_DEST (set);
6315 if (dest == x
6316 || (GET_CODE (dest) == SUBREG
6317 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6318 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6319 && SUBREG_REG (dest) == x))
6320 return basic_induction_var (loop, SET_SRC (set),
6321 (GET_MODE (SET_SRC (set)) == VOIDmode
6322 ? GET_MODE (x)
6323 : GET_MODE (SET_SRC (set))),
6324 dest_reg, insn,
6325 inc_val, mult_val, location);
6327 while (GET_CODE (dest) == SIGN_EXTRACT
6328 || GET_CODE (dest) == ZERO_EXTRACT
6329 || GET_CODE (dest) == SUBREG
6330 || GET_CODE (dest) == STRICT_LOW_PART)
6331 dest = XEXP (dest, 0);
6332 if (dest == x)
6333 break;
6335 /* Fall through. */
6337 /* Can accept constant setting of biv only when inside inner most loop.
6338 Otherwise, a biv of an inner loop may be incorrectly recognized
6339 as a biv of the outer loop,
6340 causing code to be moved INTO the inner loop. */
6341 case MEM:
6342 if (loop_invariant_p (loop, x) != 1)
6343 return 0;
6344 case CONST_INT:
6345 case SYMBOL_REF:
6346 case CONST:
6347 /* convert_modes aborts if we try to convert to or from CCmode, so just
6348 exclude that case. It is very unlikely that a condition code value
6349 would be a useful iterator anyways. convert_modes aborts if we try to
6350 convert a float mode to non-float or vice versa too. */
6351 if (loop->level == 1
6352 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6353 && GET_MODE_CLASS (mode) != MODE_CC)
6355 /* Possible bug here? Perhaps we don't know the mode of X. */
6356 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6357 *mult_val = const0_rtx;
6358 return 1;
6360 else
6361 return 0;
6363 case SIGN_EXTEND:
6364 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6365 dest_reg, p, inc_val, mult_val, location);
6367 case ASHIFTRT:
6368 /* Similar, since this can be a sign extension. */
6369 for (insn = PREV_INSN (p);
6370 (insn && GET_CODE (insn) == NOTE
6371 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6372 insn = PREV_INSN (insn))
6375 if (insn)
6376 set = single_set (insn);
6378 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6379 && set && SET_DEST (set) == XEXP (x, 0)
6380 && GET_CODE (XEXP (x, 1)) == CONST_INT
6381 && INTVAL (XEXP (x, 1)) >= 0
6382 && GET_CODE (SET_SRC (set)) == ASHIFT
6383 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6384 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6385 GET_MODE (XEXP (x, 0)),
6386 dest_reg, insn, inc_val, mult_val,
6387 location);
6388 return 0;
6390 default:
6391 return 0;
6395 /* A general induction variable (giv) is any quantity that is a linear
6396 function of a basic induction variable,
6397 i.e. giv = biv * mult_val + add_val.
6398 The coefficients can be any loop invariant quantity.
6399 A giv need not be computed directly from the biv;
6400 it can be computed by way of other givs. */
6402 /* Determine whether X computes a giv.
6403 If it does, return a nonzero value
6404 which is the benefit from eliminating the computation of X;
6405 set *SRC_REG to the register of the biv that it is computed from;
6406 set *ADD_VAL and *MULT_VAL to the coefficients,
6407 such that the value of X is biv * mult + add; */
6409 static int
6410 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6411 is_addr, pbenefit, addr_mode)
6412 const struct loop *loop;
6413 rtx x;
6414 rtx *src_reg;
6415 rtx *add_val;
6416 rtx *mult_val;
6417 rtx *ext_val;
6418 int is_addr;
6419 int *pbenefit;
6420 enum machine_mode addr_mode;
6422 struct loop_ivs *ivs = LOOP_IVS (loop);
6423 rtx orig_x = x;
6425 /* If this is an invariant, forget it, it isn't a giv. */
6426 if (loop_invariant_p (loop, x) == 1)
6427 return 0;
6429 *pbenefit = 0;
6430 *ext_val = NULL_RTX;
6431 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6432 if (x == 0)
6433 return 0;
6435 switch (GET_CODE (x))
6437 case USE:
6438 case CONST_INT:
6439 /* Since this is now an invariant and wasn't before, it must be a giv
6440 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6441 with. */
6442 *src_reg = ivs->list->biv->dest_reg;
6443 *mult_val = const0_rtx;
6444 *add_val = x;
6445 break;
6447 case REG:
6448 /* This is equivalent to a BIV. */
6449 *src_reg = x;
6450 *mult_val = const1_rtx;
6451 *add_val = const0_rtx;
6452 break;
6454 case PLUS:
6455 /* Either (plus (biv) (invar)) or
6456 (plus (mult (biv) (invar_1)) (invar_2)). */
6457 if (GET_CODE (XEXP (x, 0)) == MULT)
6459 *src_reg = XEXP (XEXP (x, 0), 0);
6460 *mult_val = XEXP (XEXP (x, 0), 1);
6462 else
6464 *src_reg = XEXP (x, 0);
6465 *mult_val = const1_rtx;
6467 *add_val = XEXP (x, 1);
6468 break;
6470 case MULT:
6471 /* ADD_VAL is zero. */
6472 *src_reg = XEXP (x, 0);
6473 *mult_val = XEXP (x, 1);
6474 *add_val = const0_rtx;
6475 break;
6477 default:
6478 abort ();
6481 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6482 unless they are CONST_INT). */
6483 if (GET_CODE (*add_val) == USE)
6484 *add_val = XEXP (*add_val, 0);
6485 if (GET_CODE (*mult_val) == USE)
6486 *mult_val = XEXP (*mult_val, 0);
6488 if (is_addr)
6489 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6490 else
6491 *pbenefit += rtx_cost (orig_x, SET);
6493 /* Always return true if this is a giv so it will be detected as such,
6494 even if the benefit is zero or negative. This allows elimination
6495 of bivs that might otherwise not be eliminated. */
6496 return 1;
6499 /* Given an expression, X, try to form it as a linear function of a biv.
6500 We will canonicalize it to be of the form
6501 (plus (mult (BIV) (invar_1))
6502 (invar_2))
6503 with possible degeneracies.
6505 The invariant expressions must each be of a form that can be used as a
6506 machine operand. We surround then with a USE rtx (a hack, but localized
6507 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6508 routine; it is the caller's responsibility to strip them.
6510 If no such canonicalization is possible (i.e., two biv's are used or an
6511 expression that is neither invariant nor a biv or giv), this routine
6512 returns 0.
6514 For a nonzero return, the result will have a code of CONST_INT, USE,
6515 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6517 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6519 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6520 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6522 static rtx
6523 simplify_giv_expr (loop, x, ext_val, benefit)
6524 const struct loop *loop;
6525 rtx x;
6526 rtx *ext_val;
6527 int *benefit;
6529 struct loop_ivs *ivs = LOOP_IVS (loop);
6530 struct loop_regs *regs = LOOP_REGS (loop);
6531 enum machine_mode mode = GET_MODE (x);
6532 rtx arg0, arg1;
6533 rtx tem;
6535 /* If this is not an integer mode, or if we cannot do arithmetic in this
6536 mode, this can't be a giv. */
6537 if (mode != VOIDmode
6538 && (GET_MODE_CLASS (mode) != MODE_INT
6539 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6540 return NULL_RTX;
6542 switch (GET_CODE (x))
6544 case PLUS:
6545 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6546 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6547 if (arg0 == 0 || arg1 == 0)
6548 return NULL_RTX;
6550 /* Put constant last, CONST_INT last if both constant. */
6551 if ((GET_CODE (arg0) == USE
6552 || GET_CODE (arg0) == CONST_INT)
6553 && ! ((GET_CODE (arg0) == USE
6554 && GET_CODE (arg1) == USE)
6555 || GET_CODE (arg1) == CONST_INT))
6556 tem = arg0, arg0 = arg1, arg1 = tem;
6558 /* Handle addition of zero, then addition of an invariant. */
6559 if (arg1 == const0_rtx)
6560 return arg0;
6561 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6562 switch (GET_CODE (arg0))
6564 case CONST_INT:
6565 case USE:
6566 /* Adding two invariants must result in an invariant, so enclose
6567 addition operation inside a USE and return it. */
6568 if (GET_CODE (arg0) == USE)
6569 arg0 = XEXP (arg0, 0);
6570 if (GET_CODE (arg1) == USE)
6571 arg1 = XEXP (arg1, 0);
6573 if (GET_CODE (arg0) == CONST_INT)
6574 tem = arg0, arg0 = arg1, arg1 = tem;
6575 if (GET_CODE (arg1) == CONST_INT)
6576 tem = sge_plus_constant (arg0, arg1);
6577 else
6578 tem = sge_plus (mode, arg0, arg1);
6580 if (GET_CODE (tem) != CONST_INT)
6581 tem = gen_rtx_USE (mode, tem);
6582 return tem;
6584 case REG:
6585 case MULT:
6586 /* biv + invar or mult + invar. Return sum. */
6587 return gen_rtx_PLUS (mode, arg0, arg1);
6589 case PLUS:
6590 /* (a + invar_1) + invar_2. Associate. */
6591 return
6592 simplify_giv_expr (loop,
6593 gen_rtx_PLUS (mode,
6594 XEXP (arg0, 0),
6595 gen_rtx_PLUS (mode,
6596 XEXP (arg0, 1),
6597 arg1)),
6598 ext_val, benefit);
6600 default:
6601 abort ();
6604 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6605 MULT to reduce cases. */
6606 if (GET_CODE (arg0) == REG)
6607 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6608 if (GET_CODE (arg1) == REG)
6609 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6611 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6612 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6613 Recurse to associate the second PLUS. */
6614 if (GET_CODE (arg1) == MULT)
6615 tem = arg0, arg0 = arg1, arg1 = tem;
6617 if (GET_CODE (arg1) == PLUS)
6618 return
6619 simplify_giv_expr (loop,
6620 gen_rtx_PLUS (mode,
6621 gen_rtx_PLUS (mode, arg0,
6622 XEXP (arg1, 0)),
6623 XEXP (arg1, 1)),
6624 ext_val, benefit);
6626 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6627 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6628 return NULL_RTX;
6630 if (!rtx_equal_p (arg0, arg1))
6631 return NULL_RTX;
6633 return simplify_giv_expr (loop,
6634 gen_rtx_MULT (mode,
6635 XEXP (arg0, 0),
6636 gen_rtx_PLUS (mode,
6637 XEXP (arg0, 1),
6638 XEXP (arg1, 1))),
6639 ext_val, benefit);
6641 case MINUS:
6642 /* Handle "a - b" as "a + b * (-1)". */
6643 return simplify_giv_expr (loop,
6644 gen_rtx_PLUS (mode,
6645 XEXP (x, 0),
6646 gen_rtx_MULT (mode,
6647 XEXP (x, 1),
6648 constm1_rtx)),
6649 ext_val, benefit);
6651 case MULT:
6652 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6653 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6654 if (arg0 == 0 || arg1 == 0)
6655 return NULL_RTX;
6657 /* Put constant last, CONST_INT last if both constant. */
6658 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6659 && GET_CODE (arg1) != CONST_INT)
6660 tem = arg0, arg0 = arg1, arg1 = tem;
6662 /* If second argument is not now constant, not giv. */
6663 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6664 return NULL_RTX;
6666 /* Handle multiply by 0 or 1. */
6667 if (arg1 == const0_rtx)
6668 return const0_rtx;
6670 else if (arg1 == const1_rtx)
6671 return arg0;
6673 switch (GET_CODE (arg0))
6675 case REG:
6676 /* biv * invar. Done. */
6677 return gen_rtx_MULT (mode, arg0, arg1);
6679 case CONST_INT:
6680 /* Product of two constants. */
6681 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6683 case USE:
6684 /* invar * invar is a giv, but attempt to simplify it somehow. */
6685 if (GET_CODE (arg1) != CONST_INT)
6686 return NULL_RTX;
6688 arg0 = XEXP (arg0, 0);
6689 if (GET_CODE (arg0) == MULT)
6691 /* (invar_0 * invar_1) * invar_2. Associate. */
6692 return simplify_giv_expr (loop,
6693 gen_rtx_MULT (mode,
6694 XEXP (arg0, 0),
6695 gen_rtx_MULT (mode,
6696 XEXP (arg0,
6698 arg1)),
6699 ext_val, benefit);
6701 /* Porpagate the MULT expressions to the intermost nodes. */
6702 else if (GET_CODE (arg0) == PLUS)
6704 /* (invar_0 + invar_1) * invar_2. Distribute. */
6705 return simplify_giv_expr (loop,
6706 gen_rtx_PLUS (mode,
6707 gen_rtx_MULT (mode,
6708 XEXP (arg0,
6710 arg1),
6711 gen_rtx_MULT (mode,
6712 XEXP (arg0,
6714 arg1)),
6715 ext_val, benefit);
6717 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6719 case MULT:
6720 /* (a * invar_1) * invar_2. Associate. */
6721 return simplify_giv_expr (loop,
6722 gen_rtx_MULT (mode,
6723 XEXP (arg0, 0),
6724 gen_rtx_MULT (mode,
6725 XEXP (arg0, 1),
6726 arg1)),
6727 ext_val, benefit);
6729 case PLUS:
6730 /* (a + invar_1) * invar_2. Distribute. */
6731 return simplify_giv_expr (loop,
6732 gen_rtx_PLUS (mode,
6733 gen_rtx_MULT (mode,
6734 XEXP (arg0, 0),
6735 arg1),
6736 gen_rtx_MULT (mode,
6737 XEXP (arg0, 1),
6738 arg1)),
6739 ext_val, benefit);
6741 default:
6742 abort ();
6745 case ASHIFT:
6746 /* Shift by constant is multiply by power of two. */
6747 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6748 return 0;
6750 return
6751 simplify_giv_expr (loop,
6752 gen_rtx_MULT (mode,
6753 XEXP (x, 0),
6754 GEN_INT ((HOST_WIDE_INT) 1
6755 << INTVAL (XEXP (x, 1)))),
6756 ext_val, benefit);
6758 case NEG:
6759 /* "-a" is "a * (-1)" */
6760 return simplify_giv_expr (loop,
6761 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6762 ext_val, benefit);
6764 case NOT:
6765 /* "~a" is "-a - 1". Silly, but easy. */
6766 return simplify_giv_expr (loop,
6767 gen_rtx_MINUS (mode,
6768 gen_rtx_NEG (mode, XEXP (x, 0)),
6769 const1_rtx),
6770 ext_val, benefit);
6772 case USE:
6773 /* Already in proper form for invariant. */
6774 return x;
6776 case SIGN_EXTEND:
6777 case ZERO_EXTEND:
6778 case TRUNCATE:
6779 /* Conditionally recognize extensions of simple IVs. After we've
6780 computed loop traversal counts and verified the range of the
6781 source IV, we'll reevaluate this as a GIV. */
6782 if (*ext_val == NULL_RTX)
6784 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6785 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6787 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6788 return arg0;
6791 goto do_default;
6793 case REG:
6794 /* If this is a new register, we can't deal with it. */
6795 if (REGNO (x) >= max_reg_before_loop)
6796 return 0;
6798 /* Check for biv or giv. */
6799 switch (REG_IV_TYPE (ivs, REGNO (x)))
6801 case BASIC_INDUCT:
6802 return x;
6803 case GENERAL_INDUCT:
6805 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6807 /* Form expression from giv and add benefit. Ensure this giv
6808 can derive another and subtract any needed adjustment if so. */
6810 /* Increasing the benefit here is risky. The only case in which it
6811 is arguably correct is if this is the only use of V. In other
6812 cases, this will artificially inflate the benefit of the current
6813 giv, and lead to suboptimal code. Thus, it is disabled, since
6814 potentially not reducing an only marginally beneficial giv is
6815 less harmful than reducing many givs that are not really
6816 beneficial. */
6818 rtx single_use = regs->array[REGNO (x)].single_usage;
6819 if (single_use && single_use != const0_rtx)
6820 *benefit += v->benefit;
6823 if (v->cant_derive)
6824 return 0;
6826 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6827 v->src_reg, v->mult_val),
6828 v->add_val);
6830 if (v->derive_adjustment)
6831 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6832 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6833 if (*ext_val)
6835 if (!v->ext_dependent)
6836 return arg0;
6838 else
6840 *ext_val = v->ext_dependent;
6841 return arg0;
6843 return 0;
6846 default:
6847 do_default:
6848 /* If it isn't an induction variable, and it is invariant, we
6849 may be able to simplify things further by looking through
6850 the bits we just moved outside the loop. */
6851 if (loop_invariant_p (loop, x) == 1)
6853 struct movable *m;
6854 struct loop_movables *movables = LOOP_MOVABLES (loop);
6856 for (m = movables->head; m; m = m->next)
6857 if (rtx_equal_p (x, m->set_dest))
6859 /* Ok, we found a match. Substitute and simplify. */
6861 /* If we match another movable, we must use that, as
6862 this one is going away. */
6863 if (m->match)
6864 return simplify_giv_expr (loop, m->match->set_dest,
6865 ext_val, benefit);
6867 /* If consec is nonzero, this is a member of a group of
6868 instructions that were moved together. We handle this
6869 case only to the point of seeking to the last insn and
6870 looking for a REG_EQUAL. Fail if we don't find one. */
6871 if (m->consec != 0)
6873 int i = m->consec;
6874 tem = m->insn;
6877 tem = NEXT_INSN (tem);
6879 while (--i > 0);
6881 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6882 if (tem)
6883 tem = XEXP (tem, 0);
6885 else
6887 tem = single_set (m->insn);
6888 if (tem)
6889 tem = SET_SRC (tem);
6892 if (tem)
6894 /* What we are most interested in is pointer
6895 arithmetic on invariants -- only take
6896 patterns we may be able to do something with. */
6897 if (GET_CODE (tem) == PLUS
6898 || GET_CODE (tem) == MULT
6899 || GET_CODE (tem) == ASHIFT
6900 || GET_CODE (tem) == CONST_INT
6901 || GET_CODE (tem) == SYMBOL_REF)
6903 tem = simplify_giv_expr (loop, tem, ext_val,
6904 benefit);
6905 if (tem)
6906 return tem;
6908 else if (GET_CODE (tem) == CONST
6909 && GET_CODE (XEXP (tem, 0)) == PLUS
6910 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6911 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6913 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6914 ext_val, benefit);
6915 if (tem)
6916 return tem;
6919 break;
6922 break;
6925 /* Fall through to general case. */
6926 default:
6927 /* If invariant, return as USE (unless CONST_INT).
6928 Otherwise, not giv. */
6929 if (GET_CODE (x) == USE)
6930 x = XEXP (x, 0);
6932 if (loop_invariant_p (loop, x) == 1)
6934 if (GET_CODE (x) == CONST_INT)
6935 return x;
6936 if (GET_CODE (x) == CONST
6937 && GET_CODE (XEXP (x, 0)) == PLUS
6938 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6939 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6940 x = XEXP (x, 0);
6941 return gen_rtx_USE (mode, x);
6943 else
6944 return 0;
6948 /* This routine folds invariants such that there is only ever one
6949 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6951 static rtx
6952 sge_plus_constant (x, c)
6953 rtx x, c;
6955 if (GET_CODE (x) == CONST_INT)
6956 return GEN_INT (INTVAL (x) + INTVAL (c));
6957 else if (GET_CODE (x) != PLUS)
6958 return gen_rtx_PLUS (GET_MODE (x), x, c);
6959 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6961 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6962 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6964 else if (GET_CODE (XEXP (x, 0)) == PLUS
6965 || GET_CODE (XEXP (x, 1)) != PLUS)
6967 return gen_rtx_PLUS (GET_MODE (x),
6968 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6970 else
6972 return gen_rtx_PLUS (GET_MODE (x),
6973 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6977 static rtx
6978 sge_plus (mode, x, y)
6979 enum machine_mode mode;
6980 rtx x, y;
6982 while (GET_CODE (y) == PLUS)
6984 rtx a = XEXP (y, 0);
6985 if (GET_CODE (a) == CONST_INT)
6986 x = sge_plus_constant (x, a);
6987 else
6988 x = gen_rtx_PLUS (mode, x, a);
6989 y = XEXP (y, 1);
6991 if (GET_CODE (y) == CONST_INT)
6992 x = sge_plus_constant (x, y);
6993 else
6994 x = gen_rtx_PLUS (mode, x, y);
6995 return x;
6998 /* Help detect a giv that is calculated by several consecutive insns;
6999 for example,
7000 giv = biv * M
7001 giv = giv + A
7002 The caller has already identified the first insn P as having a giv as dest;
7003 we check that all other insns that set the same register follow
7004 immediately after P, that they alter nothing else,
7005 and that the result of the last is still a giv.
7007 The value is 0 if the reg set in P is not really a giv.
7008 Otherwise, the value is the amount gained by eliminating
7009 all the consecutive insns that compute the value.
7011 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
7012 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
7014 The coefficients of the ultimate giv value are stored in
7015 *MULT_VAL and *ADD_VAL. */
7017 static int
7018 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
7019 add_val, mult_val, ext_val, last_consec_insn)
7020 const struct loop *loop;
7021 int first_benefit;
7022 rtx p;
7023 rtx src_reg;
7024 rtx dest_reg;
7025 rtx *add_val;
7026 rtx *mult_val;
7027 rtx *ext_val;
7028 rtx *last_consec_insn;
7030 struct loop_ivs *ivs = LOOP_IVS (loop);
7031 struct loop_regs *regs = LOOP_REGS (loop);
7032 int count;
7033 enum rtx_code code;
7034 int benefit;
7035 rtx temp;
7036 rtx set;
7038 /* Indicate that this is a giv so that we can update the value produced in
7039 each insn of the multi-insn sequence.
7041 This induction structure will be used only by the call to
7042 general_induction_var below, so we can allocate it on our stack.
7043 If this is a giv, our caller will replace the induct var entry with
7044 a new induction structure. */
7045 struct induction *v;
7047 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7048 return 0;
7050 v = (struct induction *) alloca (sizeof (struct induction));
7051 v->src_reg = src_reg;
7052 v->mult_val = *mult_val;
7053 v->add_val = *add_val;
7054 v->benefit = first_benefit;
7055 v->cant_derive = 0;
7056 v->derive_adjustment = 0;
7057 v->ext_dependent = NULL_RTX;
7059 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7060 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7062 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7064 while (count > 0)
7066 p = NEXT_INSN (p);
7067 code = GET_CODE (p);
7069 /* If libcall, skip to end of call sequence. */
7070 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7071 p = XEXP (temp, 0);
7073 if (code == INSN
7074 && (set = single_set (p))
7075 && GET_CODE (SET_DEST (set)) == REG
7076 && SET_DEST (set) == dest_reg
7077 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7078 add_val, mult_val, ext_val, 0,
7079 &benefit, VOIDmode)
7080 /* Giv created by equivalent expression. */
7081 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7082 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7083 add_val, mult_val, ext_val, 0,
7084 &benefit, VOIDmode)))
7085 && src_reg == v->src_reg)
7087 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7088 benefit += libcall_benefit (p);
7090 count--;
7091 v->mult_val = *mult_val;
7092 v->add_val = *add_val;
7093 v->benefit += benefit;
7095 else if (code != NOTE)
7097 /* Allow insns that set something other than this giv to a
7098 constant. Such insns are needed on machines which cannot
7099 include long constants and should not disqualify a giv. */
7100 if (code == INSN
7101 && (set = single_set (p))
7102 && SET_DEST (set) != dest_reg
7103 && CONSTANT_P (SET_SRC (set)))
7104 continue;
7106 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7107 return 0;
7111 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7112 *last_consec_insn = p;
7113 return v->benefit;
7116 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7117 represented by G1. If no such expression can be found, or it is clear that
7118 it cannot possibly be a valid address, 0 is returned.
7120 To perform the computation, we note that
7121 G1 = x * v + a and
7122 G2 = y * v + b
7123 where `v' is the biv.
7125 So G2 = (y/b) * G1 + (b - a*y/x).
7127 Note that MULT = y/x.
7129 Update: A and B are now allowed to be additive expressions such that
7130 B contains all variables in A. That is, computing B-A will not require
7131 subtracting variables. */
7133 static rtx
7134 express_from_1 (a, b, mult)
7135 rtx a, b, mult;
7137 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7139 if (mult == const0_rtx)
7140 return b;
7142 /* If MULT is not 1, we cannot handle A with non-constants, since we
7143 would then be required to subtract multiples of the registers in A.
7144 This is theoretically possible, and may even apply to some Fortran
7145 constructs, but it is a lot of work and we do not attempt it here. */
7147 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7148 return NULL_RTX;
7150 /* In general these structures are sorted top to bottom (down the PLUS
7151 chain), but not left to right across the PLUS. If B is a higher
7152 order giv than A, we can strip one level and recurse. If A is higher
7153 order, we'll eventually bail out, but won't know that until the end.
7154 If they are the same, we'll strip one level around this loop. */
7156 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7158 rtx ra, rb, oa, ob, tmp;
7160 ra = XEXP (a, 0), oa = XEXP (a, 1);
7161 if (GET_CODE (ra) == PLUS)
7162 tmp = ra, ra = oa, oa = tmp;
7164 rb = XEXP (b, 0), ob = XEXP (b, 1);
7165 if (GET_CODE (rb) == PLUS)
7166 tmp = rb, rb = ob, ob = tmp;
7168 if (rtx_equal_p (ra, rb))
7169 /* We matched: remove one reg completely. */
7170 a = oa, b = ob;
7171 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7172 /* An alternate match. */
7173 a = oa, b = rb;
7174 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7175 /* An alternate match. */
7176 a = ra, b = ob;
7177 else
7179 /* Indicates an extra register in B. Strip one level from B and
7180 recurse, hoping B was the higher order expression. */
7181 ob = express_from_1 (a, ob, mult);
7182 if (ob == NULL_RTX)
7183 return NULL_RTX;
7184 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7188 /* Here we are at the last level of A, go through the cases hoping to
7189 get rid of everything but a constant. */
7191 if (GET_CODE (a) == PLUS)
7193 rtx ra, oa;
7195 ra = XEXP (a, 0), oa = XEXP (a, 1);
7196 if (rtx_equal_p (oa, b))
7197 oa = ra;
7198 else if (!rtx_equal_p (ra, b))
7199 return NULL_RTX;
7201 if (GET_CODE (oa) != CONST_INT)
7202 return NULL_RTX;
7204 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7206 else if (GET_CODE (a) == CONST_INT)
7208 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7210 else if (CONSTANT_P (a))
7212 enum machine_mode mode_a = GET_MODE (a);
7213 enum machine_mode mode_b = GET_MODE (b);
7214 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7215 return simplify_gen_binary (MINUS, mode, b, a);
7217 else if (GET_CODE (b) == PLUS)
7219 if (rtx_equal_p (a, XEXP (b, 0)))
7220 return XEXP (b, 1);
7221 else if (rtx_equal_p (a, XEXP (b, 1)))
7222 return XEXP (b, 0);
7223 else
7224 return NULL_RTX;
7226 else if (rtx_equal_p (a, b))
7227 return const0_rtx;
7229 return NULL_RTX;
7233 express_from (g1, g2)
7234 struct induction *g1, *g2;
7236 rtx mult, add;
7238 /* The value that G1 will be multiplied by must be a constant integer. Also,
7239 the only chance we have of getting a valid address is if b*c/a (see above
7240 for notation) is also an integer. */
7241 if (GET_CODE (g1->mult_val) == CONST_INT
7242 && GET_CODE (g2->mult_val) == CONST_INT)
7244 if (g1->mult_val == const0_rtx
7245 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7246 return NULL_RTX;
7247 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7249 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7250 mult = const1_rtx;
7251 else
7253 /* ??? Find out if the one is a multiple of the other? */
7254 return NULL_RTX;
7257 add = express_from_1 (g1->add_val, g2->add_val, mult);
7258 if (add == NULL_RTX)
7260 /* Failed. If we've got a multiplication factor between G1 and G2,
7261 scale G1's addend and try again. */
7262 if (INTVAL (mult) > 1)
7264 rtx g1_add_val = g1->add_val;
7265 if (GET_CODE (g1_add_val) == MULT
7266 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7268 HOST_WIDE_INT m;
7269 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7270 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7271 XEXP (g1_add_val, 0), GEN_INT (m));
7273 else
7275 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7276 mult);
7279 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7282 if (add == NULL_RTX)
7283 return NULL_RTX;
7285 /* Form simplified final result. */
7286 if (mult == const0_rtx)
7287 return add;
7288 else if (mult == const1_rtx)
7289 mult = g1->dest_reg;
7290 else
7291 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7293 if (add == const0_rtx)
7294 return mult;
7295 else
7297 if (GET_CODE (add) == PLUS
7298 && CONSTANT_P (XEXP (add, 1)))
7300 rtx tem = XEXP (add, 1);
7301 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7302 add = tem;
7305 return gen_rtx_PLUS (g2->mode, mult, add);
7309 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7310 represented by G1. This indicates that G2 should be combined with G1 and
7311 that G2 can use (either directly or via an address expression) a register
7312 used to represent G1. */
7314 static rtx
7315 combine_givs_p (g1, g2)
7316 struct induction *g1, *g2;
7318 rtx comb, ret;
7320 /* With the introduction of ext dependent givs, we must care for modes.
7321 G2 must not use a wider mode than G1. */
7322 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7323 return NULL_RTX;
7325 ret = comb = express_from (g1, g2);
7326 if (comb == NULL_RTX)
7327 return NULL_RTX;
7328 if (g1->mode != g2->mode)
7329 ret = gen_lowpart (g2->mode, comb);
7331 /* If these givs are identical, they can be combined. We use the results
7332 of express_from because the addends are not in a canonical form, so
7333 rtx_equal_p is a weaker test. */
7334 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7335 combination to be the other way round. */
7336 if (comb == g1->dest_reg
7337 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7339 return ret;
7342 /* If G2 can be expressed as a function of G1 and that function is valid
7343 as an address and no more expensive than using a register for G2,
7344 the expression of G2 in terms of G1 can be used. */
7345 if (ret != NULL_RTX
7346 && g2->giv_type == DEST_ADDR
7347 && memory_address_p (GET_MODE (g2->mem), ret)
7348 /* ??? Looses, especially with -fforce-addr, where *g2->location
7349 will always be a register, and so anything more complicated
7350 gets discarded. */
7351 #if 0
7352 #ifdef ADDRESS_COST
7353 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7354 #else
7355 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7356 #endif
7357 #endif
7360 return ret;
7363 return NULL_RTX;
7366 /* Check each extension dependent giv in this class to see if its
7367 root biv is safe from wrapping in the interior mode, which would
7368 make the giv illegal. */
7370 static void
7371 check_ext_dependent_givs (bl, loop_info)
7372 struct iv_class *bl;
7373 struct loop_info *loop_info;
7375 int ze_ok = 0, se_ok = 0, info_ok = 0;
7376 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7377 HOST_WIDE_INT start_val;
7378 unsigned HOST_WIDE_INT u_end_val = 0;
7379 unsigned HOST_WIDE_INT u_start_val = 0;
7380 rtx incr = pc_rtx;
7381 struct induction *v;
7383 /* Make sure the iteration data is available. We must have
7384 constants in order to be certain of no overflow. */
7385 /* ??? An unknown iteration count with an increment of +-1
7386 combined with friendly exit tests of against an invariant
7387 value is also ameanable to optimization. Not implemented. */
7388 if (loop_info->n_iterations > 0
7389 && bl->initial_value
7390 && GET_CODE (bl->initial_value) == CONST_INT
7391 && (incr = biv_total_increment (bl))
7392 && GET_CODE (incr) == CONST_INT
7393 /* Make sure the host can represent the arithmetic. */
7394 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7396 unsigned HOST_WIDE_INT abs_incr, total_incr;
7397 HOST_WIDE_INT s_end_val;
7398 int neg_incr;
7400 info_ok = 1;
7401 start_val = INTVAL (bl->initial_value);
7402 u_start_val = start_val;
7404 neg_incr = 0, abs_incr = INTVAL (incr);
7405 if (INTVAL (incr) < 0)
7406 neg_incr = 1, abs_incr = -abs_incr;
7407 total_incr = abs_incr * loop_info->n_iterations;
7409 /* Check for host arithmatic overflow. */
7410 if (total_incr / loop_info->n_iterations == abs_incr)
7412 unsigned HOST_WIDE_INT u_max;
7413 HOST_WIDE_INT s_max;
7415 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7416 s_end_val = u_end_val;
7417 u_max = GET_MODE_MASK (biv_mode);
7418 s_max = u_max >> 1;
7420 /* Check zero extension of biv ok. */
7421 if (start_val >= 0
7422 /* Check for host arithmatic overflow. */
7423 && (neg_incr
7424 ? u_end_val < u_start_val
7425 : u_end_val > u_start_val)
7426 /* Check for target arithmetic overflow. */
7427 && (neg_incr
7428 ? 1 /* taken care of with host overflow */
7429 : u_end_val <= u_max))
7431 ze_ok = 1;
7434 /* Check sign extension of biv ok. */
7435 /* ??? While it is true that overflow with signed and pointer
7436 arithmetic is undefined, I fear too many programmers don't
7437 keep this fact in mind -- myself included on occasion.
7438 So leave alone with the signed overflow optimizations. */
7439 if (start_val >= -s_max - 1
7440 /* Check for host arithmatic overflow. */
7441 && (neg_incr
7442 ? s_end_val < start_val
7443 : s_end_val > start_val)
7444 /* Check for target arithmetic overflow. */
7445 && (neg_incr
7446 ? s_end_val >= -s_max - 1
7447 : s_end_val <= s_max))
7449 se_ok = 1;
7454 /* Invalidate givs that fail the tests. */
7455 for (v = bl->giv; v; v = v->next_iv)
7456 if (v->ext_dependent)
7458 enum rtx_code code = GET_CODE (v->ext_dependent);
7459 int ok = 0;
7461 switch (code)
7463 case SIGN_EXTEND:
7464 ok = se_ok;
7465 break;
7466 case ZERO_EXTEND:
7467 ok = ze_ok;
7468 break;
7470 case TRUNCATE:
7471 /* We don't know whether this value is being used as either
7472 signed or unsigned, so to safely truncate we must satisfy
7473 both. The initial check here verifies the BIV itself;
7474 once that is successful we may check its range wrt the
7475 derived GIV. */
7476 if (se_ok && ze_ok)
7478 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7479 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7481 /* We know from the above that both endpoints are nonnegative,
7482 and that there is no wrapping. Verify that both endpoints
7483 are within the (signed) range of the outer mode. */
7484 if (u_start_val <= max && u_end_val <= max)
7485 ok = 1;
7487 break;
7489 default:
7490 abort ();
7493 if (ok)
7495 if (loop_dump_stream)
7497 fprintf (loop_dump_stream,
7498 "Verified ext dependent giv at %d of reg %d\n",
7499 INSN_UID (v->insn), bl->regno);
7502 else
7504 if (loop_dump_stream)
7506 const char *why;
7508 if (info_ok)
7509 why = "biv iteration values overflowed";
7510 else
7512 if (incr == pc_rtx)
7513 incr = biv_total_increment (bl);
7514 if (incr == const1_rtx)
7515 why = "biv iteration info incomplete; incr by 1";
7516 else
7517 why = "biv iteration info incomplete";
7520 fprintf (loop_dump_stream,
7521 "Failed ext dependent giv at %d, %s\n",
7522 INSN_UID (v->insn), why);
7524 v->ignore = 1;
7525 bl->all_reduced = 0;
7530 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7533 extend_value_for_giv (v, value)
7534 struct induction *v;
7535 rtx value;
7537 rtx ext_dep = v->ext_dependent;
7539 if (! ext_dep)
7540 return value;
7542 /* Recall that check_ext_dependent_givs verified that the known bounds
7543 of a biv did not overflow or wrap with respect to the extension for
7544 the giv. Therefore, constants need no additional adjustment. */
7545 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7546 return value;
7548 /* Otherwise, we must adjust the value to compensate for the
7549 differing modes of the biv and the giv. */
7550 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7553 struct combine_givs_stats
7555 int giv_number;
7556 int total_benefit;
7559 static int
7560 cmp_combine_givs_stats (xp, yp)
7561 const PTR xp;
7562 const PTR yp;
7564 const struct combine_givs_stats * const x =
7565 (const struct combine_givs_stats *) xp;
7566 const struct combine_givs_stats * const y =
7567 (const struct combine_givs_stats *) yp;
7568 int d;
7569 d = y->total_benefit - x->total_benefit;
7570 /* Stabilize the sort. */
7571 if (!d)
7572 d = x->giv_number - y->giv_number;
7573 return d;
7576 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7577 any other. If so, point SAME to the giv combined with and set NEW_REG to
7578 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7579 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7581 static void
7582 combine_givs (regs, bl)
7583 struct loop_regs *regs;
7584 struct iv_class *bl;
7586 /* Additional benefit to add for being combined multiple times. */
7587 const int extra_benefit = 3;
7589 struct induction *g1, *g2, **giv_array;
7590 int i, j, k, giv_count;
7591 struct combine_givs_stats *stats;
7592 rtx *can_combine;
7594 /* Count givs, because bl->giv_count is incorrect here. */
7595 giv_count = 0;
7596 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7597 if (!g1->ignore)
7598 giv_count++;
7600 giv_array
7601 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7602 i = 0;
7603 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7604 if (!g1->ignore)
7605 giv_array[i++] = g1;
7607 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7608 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7610 for (i = 0; i < giv_count; i++)
7612 int this_benefit;
7613 rtx single_use;
7615 g1 = giv_array[i];
7616 stats[i].giv_number = i;
7618 /* If a DEST_REG GIV is used only once, do not allow it to combine
7619 with anything, for in doing so we will gain nothing that cannot
7620 be had by simply letting the GIV with which we would have combined
7621 to be reduced on its own. The losage shows up in particular with
7622 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7623 be seen elsewhere as well. */
7624 if (g1->giv_type == DEST_REG
7625 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7626 && single_use != const0_rtx)
7627 continue;
7629 this_benefit = g1->benefit;
7630 /* Add an additional weight for zero addends. */
7631 if (g1->no_const_addval)
7632 this_benefit += 1;
7634 for (j = 0; j < giv_count; j++)
7636 rtx this_combine;
7638 g2 = giv_array[j];
7639 if (g1 != g2
7640 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7642 can_combine[i * giv_count + j] = this_combine;
7643 this_benefit += g2->benefit + extra_benefit;
7646 stats[i].total_benefit = this_benefit;
7649 /* Iterate, combining until we can't. */
7650 restart:
7651 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7653 if (loop_dump_stream)
7655 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7656 for (k = 0; k < giv_count; k++)
7658 g1 = giv_array[stats[k].giv_number];
7659 if (!g1->combined_with && !g1->same)
7660 fprintf (loop_dump_stream, " {%d, %d}",
7661 INSN_UID (giv_array[stats[k].giv_number]->insn),
7662 stats[k].total_benefit);
7664 putc ('\n', loop_dump_stream);
7667 for (k = 0; k < giv_count; k++)
7669 int g1_add_benefit = 0;
7671 i = stats[k].giv_number;
7672 g1 = giv_array[i];
7674 /* If it has already been combined, skip. */
7675 if (g1->combined_with || g1->same)
7676 continue;
7678 for (j = 0; j < giv_count; j++)
7680 g2 = giv_array[j];
7681 if (g1 != g2 && can_combine[i * giv_count + j]
7682 /* If it has already been combined, skip. */
7683 && ! g2->same && ! g2->combined_with)
7685 int l;
7687 g2->new_reg = can_combine[i * giv_count + j];
7688 g2->same = g1;
7689 /* For destination, we now may replace by mem expression instead
7690 of register. This changes the costs considerably, so add the
7691 compensation. */
7692 if (g2->giv_type == DEST_ADDR)
7693 g2->benefit = (g2->benefit + reg_address_cost
7694 - address_cost (g2->new_reg,
7695 GET_MODE (g2->mem)));
7696 g1->combined_with++;
7697 g1->lifetime += g2->lifetime;
7699 g1_add_benefit += g2->benefit;
7701 /* ??? The new final_[bg]iv_value code does a much better job
7702 of finding replaceable giv's, and hence this code may no
7703 longer be necessary. */
7704 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7705 g1_add_benefit -= copy_cost;
7707 /* To help optimize the next set of combinations, remove
7708 this giv from the benefits of other potential mates. */
7709 for (l = 0; l < giv_count; ++l)
7711 int m = stats[l].giv_number;
7712 if (can_combine[m * giv_count + j])
7713 stats[l].total_benefit -= g2->benefit + extra_benefit;
7716 if (loop_dump_stream)
7717 fprintf (loop_dump_stream,
7718 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7719 INSN_UID (g2->insn), INSN_UID (g1->insn),
7720 g1->benefit, g1_add_benefit, g1->lifetime);
7724 /* To help optimize the next set of combinations, remove
7725 this giv from the benefits of other potential mates. */
7726 if (g1->combined_with)
7728 for (j = 0; j < giv_count; ++j)
7730 int m = stats[j].giv_number;
7731 if (can_combine[m * giv_count + i])
7732 stats[j].total_benefit -= g1->benefit + extra_benefit;
7735 g1->benefit += g1_add_benefit;
7737 /* We've finished with this giv, and everything it touched.
7738 Restart the combination so that proper weights for the
7739 rest of the givs are properly taken into account. */
7740 /* ??? Ideally we would compact the arrays at this point, so
7741 as to not cover old ground. But sanely compacting
7742 can_combine is tricky. */
7743 goto restart;
7747 /* Clean up. */
7748 free (stats);
7749 free (can_combine);
7752 /* Generate sequence for REG = B * M + A. */
7754 static rtx
7755 gen_add_mult (b, m, a, reg)
7756 rtx b; /* initial value of basic induction variable */
7757 rtx m; /* multiplicative constant */
7758 rtx a; /* additive constant */
7759 rtx reg; /* destination register */
7761 rtx seq;
7762 rtx result;
7764 start_sequence ();
7765 /* Use unsigned arithmetic. */
7766 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7767 if (reg != result)
7768 emit_move_insn (reg, result);
7769 seq = get_insns ();
7770 end_sequence ();
7772 return seq;
7776 /* Update registers created in insn sequence SEQ. */
7778 static void
7779 loop_regs_update (loop, seq)
7780 const struct loop *loop ATTRIBUTE_UNUSED;
7781 rtx seq;
7783 rtx insn;
7785 /* Update register info for alias analysis. */
7787 if (seq == NULL_RTX)
7788 return;
7790 if (INSN_P (seq))
7792 insn = seq;
7793 while (insn != NULL_RTX)
7795 rtx set = single_set (insn);
7797 if (set && GET_CODE (SET_DEST (set)) == REG)
7798 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7800 insn = NEXT_INSN (insn);
7803 else if (GET_CODE (seq) == SET
7804 && GET_CODE (SET_DEST (seq)) == REG)
7805 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7809 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7811 void
7812 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7813 const struct loop *loop;
7814 rtx b; /* initial value of basic induction variable */
7815 rtx m; /* multiplicative constant */
7816 rtx a; /* additive constant */
7817 rtx reg; /* destination register */
7818 basic_block before_bb;
7819 rtx before_insn;
7821 rtx seq;
7823 if (! before_insn)
7825 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7826 return;
7829 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7830 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7832 /* Increase the lifetime of any invariants moved further in code. */
7833 update_reg_last_use (a, before_insn);
7834 update_reg_last_use (b, before_insn);
7835 update_reg_last_use (m, before_insn);
7837 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7839 /* It is possible that the expansion created lots of new registers.
7840 Iterate over the sequence we just created and record them all. */
7841 loop_regs_update (loop, seq);
7845 /* Emit insns in loop pre-header to set REG = B * M + A. */
7847 void
7848 loop_iv_add_mult_sink (loop, b, m, a, reg)
7849 const struct loop *loop;
7850 rtx b; /* initial value of basic induction variable */
7851 rtx m; /* multiplicative constant */
7852 rtx a; /* additive constant */
7853 rtx reg; /* destination register */
7855 rtx seq;
7857 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7858 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7860 /* Increase the lifetime of any invariants moved further in code.
7861 ???? Is this really necessary? */
7862 update_reg_last_use (a, loop->sink);
7863 update_reg_last_use (b, loop->sink);
7864 update_reg_last_use (m, loop->sink);
7866 loop_insn_sink (loop, seq);
7868 /* It is possible that the expansion created lots of new registers.
7869 Iterate over the sequence we just created and record them all. */
7870 loop_regs_update (loop, seq);
7874 /* Emit insns after loop to set REG = B * M + A. */
7876 void
7877 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7878 const struct loop *loop;
7879 rtx b; /* initial value of basic induction variable */
7880 rtx m; /* multiplicative constant */
7881 rtx a; /* additive constant */
7882 rtx reg; /* destination register */
7884 rtx seq;
7886 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7887 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7889 loop_insn_hoist (loop, seq);
7891 /* It is possible that the expansion created lots of new registers.
7892 Iterate over the sequence we just created and record them all. */
7893 loop_regs_update (loop, seq);
7898 /* Similar to gen_add_mult, but compute cost rather than generating
7899 sequence. */
7901 static int
7902 iv_add_mult_cost (b, m, a, reg)
7903 rtx b; /* initial value of basic induction variable */
7904 rtx m; /* multiplicative constant */
7905 rtx a; /* additive constant */
7906 rtx reg; /* destination register */
7908 int cost = 0;
7909 rtx last, result;
7911 start_sequence ();
7912 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7913 if (reg != result)
7914 emit_move_insn (reg, result);
7915 last = get_last_insn ();
7916 while (last)
7918 rtx t = single_set (last);
7919 if (t)
7920 cost += rtx_cost (SET_SRC (t), SET);
7921 last = PREV_INSN (last);
7923 end_sequence ();
7924 return cost;
7927 /* Test whether A * B can be computed without
7928 an actual multiply insn. Value is 1 if so.
7930 ??? This function stinks because it generates a ton of wasted RTL
7931 ??? and as a result fragments GC memory to no end. There are other
7932 ??? places in the compiler which are invoked a lot and do the same
7933 ??? thing, generate wasted RTL just to see if something is possible. */
7935 static int
7936 product_cheap_p (a, b)
7937 rtx a;
7938 rtx b;
7940 rtx tmp;
7941 int win, n_insns;
7943 /* If only one is constant, make it B. */
7944 if (GET_CODE (a) == CONST_INT)
7945 tmp = a, a = b, b = tmp;
7947 /* If first constant, both constant, so don't need multiply. */
7948 if (GET_CODE (a) == CONST_INT)
7949 return 1;
7951 /* If second not constant, neither is constant, so would need multiply. */
7952 if (GET_CODE (b) != CONST_INT)
7953 return 0;
7955 /* One operand is constant, so might not need multiply insn. Generate the
7956 code for the multiply and see if a call or multiply, or long sequence
7957 of insns is generated. */
7959 start_sequence ();
7960 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7961 tmp = get_insns ();
7962 end_sequence ();
7964 win = 1;
7965 if (INSN_P (tmp))
7967 n_insns = 0;
7968 while (tmp != NULL_RTX)
7970 rtx next = NEXT_INSN (tmp);
7972 if (++n_insns > 3
7973 || GET_CODE (tmp) != INSN
7974 || (GET_CODE (PATTERN (tmp)) == SET
7975 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7976 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7977 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7978 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7980 win = 0;
7981 break;
7984 tmp = next;
7987 else if (GET_CODE (tmp) == SET
7988 && GET_CODE (SET_SRC (tmp)) == MULT)
7989 win = 0;
7990 else if (GET_CODE (tmp) == PARALLEL
7991 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7992 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7993 win = 0;
7995 return win;
7998 /* Check to see if loop can be terminated by a "decrement and branch until
7999 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
8000 Also try reversing an increment loop to a decrement loop
8001 to see if the optimization can be performed.
8002 Value is nonzero if optimization was performed. */
8004 /* This is useful even if the architecture doesn't have such an insn,
8005 because it might change a loops which increments from 0 to n to a loop
8006 which decrements from n to 0. A loop that decrements to zero is usually
8007 faster than one that increments from zero. */
8009 /* ??? This could be rewritten to use some of the loop unrolling procedures,
8010 such as approx_final_value, biv_total_increment, loop_iterations, and
8011 final_[bg]iv_value. */
8013 static int
8014 check_dbra_loop (loop, insn_count)
8015 struct loop *loop;
8016 int insn_count;
8018 struct loop_info *loop_info = LOOP_INFO (loop);
8019 struct loop_regs *regs = LOOP_REGS (loop);
8020 struct loop_ivs *ivs = LOOP_IVS (loop);
8021 struct iv_class *bl;
8022 rtx reg;
8023 rtx jump_label;
8024 rtx final_value;
8025 rtx start_value;
8026 rtx new_add_val;
8027 rtx comparison;
8028 rtx before_comparison;
8029 rtx p;
8030 rtx jump;
8031 rtx first_compare;
8032 int compare_and_branch;
8033 rtx loop_start = loop->start;
8034 rtx loop_end = loop->end;
8036 /* If last insn is a conditional branch, and the insn before tests a
8037 register value, try to optimize it. Otherwise, we can't do anything. */
8039 jump = PREV_INSN (loop_end);
8040 comparison = get_condition_for_loop (loop, jump);
8041 if (comparison == 0)
8042 return 0;
8043 if (!onlyjump_p (jump))
8044 return 0;
8046 /* Try to compute whether the compare/branch at the loop end is one or
8047 two instructions. */
8048 get_condition (jump, &first_compare);
8049 if (first_compare == jump)
8050 compare_and_branch = 1;
8051 else if (first_compare == prev_nonnote_insn (jump))
8052 compare_and_branch = 2;
8053 else
8054 return 0;
8057 /* If more than one condition is present to control the loop, then
8058 do not proceed, as this function does not know how to rewrite
8059 loop tests with more than one condition.
8061 Look backwards from the first insn in the last comparison
8062 sequence and see if we've got another comparison sequence. */
8064 rtx jump1;
8065 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8066 if (GET_CODE (jump1) == JUMP_INSN)
8067 return 0;
8070 /* Check all of the bivs to see if the compare uses one of them.
8071 Skip biv's set more than once because we can't guarantee that
8072 it will be zero on the last iteration. Also skip if the biv is
8073 used between its update and the test insn. */
8075 for (bl = ivs->list; bl; bl = bl->next)
8077 if (bl->biv_count == 1
8078 && ! bl->biv->maybe_multiple
8079 && bl->biv->dest_reg == XEXP (comparison, 0)
8080 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8081 first_compare))
8082 break;
8085 if (! bl)
8086 return 0;
8088 /* Look for the case where the basic induction variable is always
8089 nonnegative, and equals zero on the last iteration.
8090 In this case, add a reg_note REG_NONNEG, which allows the
8091 m68k DBRA instruction to be used. */
8093 if (((GET_CODE (comparison) == GT
8094 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8095 && INTVAL (XEXP (comparison, 1)) == -1)
8096 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8097 && GET_CODE (bl->biv->add_val) == CONST_INT
8098 && INTVAL (bl->biv->add_val) < 0)
8100 /* Initial value must be greater than 0,
8101 init_val % -dec_value == 0 to ensure that it equals zero on
8102 the last iteration */
8104 if (GET_CODE (bl->initial_value) == CONST_INT
8105 && INTVAL (bl->initial_value) > 0
8106 && (INTVAL (bl->initial_value)
8107 % (-INTVAL (bl->biv->add_val))) == 0)
8109 /* register always nonnegative, add REG_NOTE to branch */
8110 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8111 REG_NOTES (jump)
8112 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8113 REG_NOTES (jump));
8114 bl->nonneg = 1;
8116 return 1;
8119 /* If the decrement is 1 and the value was tested as >= 0 before
8120 the loop, then we can safely optimize. */
8121 for (p = loop_start; p; p = PREV_INSN (p))
8123 if (GET_CODE (p) == CODE_LABEL)
8124 break;
8125 if (GET_CODE (p) != JUMP_INSN)
8126 continue;
8128 before_comparison = get_condition_for_loop (loop, p);
8129 if (before_comparison
8130 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8131 && GET_CODE (before_comparison) == LT
8132 && XEXP (before_comparison, 1) == const0_rtx
8133 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8134 && INTVAL (bl->biv->add_val) == -1)
8136 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8137 REG_NOTES (jump)
8138 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8139 REG_NOTES (jump));
8140 bl->nonneg = 1;
8142 return 1;
8146 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8147 && INTVAL (bl->biv->add_val) > 0)
8149 /* Try to change inc to dec, so can apply above optimization. */
8150 /* Can do this if:
8151 all registers modified are induction variables or invariant,
8152 all memory references have non-overlapping addresses
8153 (obviously true if only one write)
8154 allow 2 insns for the compare/jump at the end of the loop. */
8155 /* Also, we must avoid any instructions which use both the reversed
8156 biv and another biv. Such instructions will fail if the loop is
8157 reversed. We meet this condition by requiring that either
8158 no_use_except_counting is true, or else that there is only
8159 one biv. */
8160 int num_nonfixed_reads = 0;
8161 /* 1 if the iteration var is used only to count iterations. */
8162 int no_use_except_counting = 0;
8163 /* 1 if the loop has no memory store, or it has a single memory store
8164 which is reversible. */
8165 int reversible_mem_store = 1;
8167 if (bl->giv_count == 0
8168 && !loop->exit_count
8169 && !loop_info->has_multiple_exit_targets)
8171 rtx bivreg = regno_reg_rtx[bl->regno];
8172 struct iv_class *blt;
8174 /* If there are no givs for this biv, and the only exit is the
8175 fall through at the end of the loop, then
8176 see if perhaps there are no uses except to count. */
8177 no_use_except_counting = 1;
8178 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8179 if (INSN_P (p))
8181 rtx set = single_set (p);
8183 if (set && GET_CODE (SET_DEST (set)) == REG
8184 && REGNO (SET_DEST (set)) == bl->regno)
8185 /* An insn that sets the biv is okay. */
8187 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8188 || p == prev_nonnote_insn (loop_end))
8189 && reg_mentioned_p (bivreg, PATTERN (p)))
8191 /* If either of these insns uses the biv and sets a pseudo
8192 that has more than one usage, then the biv has uses
8193 other than counting since it's used to derive a value
8194 that is used more than one time. */
8195 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8196 regs);
8197 if (regs->multiple_uses)
8199 no_use_except_counting = 0;
8200 break;
8203 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8205 no_use_except_counting = 0;
8206 break;
8210 /* A biv has uses besides counting if it is used to set
8211 another biv. */
8212 for (blt = ivs->list; blt; blt = blt->next)
8213 if (blt->init_set
8214 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8216 no_use_except_counting = 0;
8217 break;
8221 if (no_use_except_counting)
8222 /* No need to worry about MEMs. */
8224 else if (loop_info->num_mem_sets <= 1)
8226 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8227 if (INSN_P (p))
8228 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8230 /* If the loop has a single store, and the destination address is
8231 invariant, then we can't reverse the loop, because this address
8232 might then have the wrong value at loop exit.
8233 This would work if the source was invariant also, however, in that
8234 case, the insn should have been moved out of the loop. */
8236 if (loop_info->num_mem_sets == 1)
8238 struct induction *v;
8240 /* If we could prove that each of the memory locations
8241 written to was different, then we could reverse the
8242 store -- but we don't presently have any way of
8243 knowing that. */
8244 reversible_mem_store = 0;
8246 /* If the store depends on a register that is set after the
8247 store, it depends on the initial value, and is thus not
8248 reversible. */
8249 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8251 if (v->giv_type == DEST_REG
8252 && reg_mentioned_p (v->dest_reg,
8253 PATTERN (loop_info->first_loop_store_insn))
8254 && loop_insn_first_p (loop_info->first_loop_store_insn,
8255 v->insn))
8256 reversible_mem_store = 0;
8260 else
8261 return 0;
8263 /* This code only acts for innermost loops. Also it simplifies
8264 the memory address check by only reversing loops with
8265 zero or one memory access.
8266 Two memory accesses could involve parts of the same array,
8267 and that can't be reversed.
8268 If the biv is used only for counting, than we don't need to worry
8269 about all these things. */
8271 if ((num_nonfixed_reads <= 1
8272 && ! loop_info->has_nonconst_call
8273 && ! loop_info->has_prefetch
8274 && ! loop_info->has_volatile
8275 && reversible_mem_store
8276 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8277 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8278 && (bl == ivs->list && bl->next == 0))
8279 || (no_use_except_counting && ! loop_info->has_prefetch))
8281 rtx tem;
8283 /* Loop can be reversed. */
8284 if (loop_dump_stream)
8285 fprintf (loop_dump_stream, "Can reverse loop\n");
8287 /* Now check other conditions:
8289 The increment must be a constant, as must the initial value,
8290 and the comparison code must be LT.
8292 This test can probably be improved since +/- 1 in the constant
8293 can be obtained by changing LT to LE and vice versa; this is
8294 confusing. */
8296 if (comparison
8297 /* for constants, LE gets turned into LT */
8298 && (GET_CODE (comparison) == LT
8299 || (GET_CODE (comparison) == LE
8300 && no_use_except_counting)))
8302 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8303 rtx initial_value, comparison_value;
8304 int nonneg = 0;
8305 enum rtx_code cmp_code;
8306 int comparison_const_width;
8307 unsigned HOST_WIDE_INT comparison_sign_mask;
8309 add_val = INTVAL (bl->biv->add_val);
8310 comparison_value = XEXP (comparison, 1);
8311 if (GET_MODE (comparison_value) == VOIDmode)
8312 comparison_const_width
8313 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8314 else
8315 comparison_const_width
8316 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8317 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8318 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8319 comparison_sign_mask
8320 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8322 /* If the comparison value is not a loop invariant, then we
8323 can not reverse this loop.
8325 ??? If the insns which initialize the comparison value as
8326 a whole compute an invariant result, then we could move
8327 them out of the loop and proceed with loop reversal. */
8328 if (! loop_invariant_p (loop, comparison_value))
8329 return 0;
8331 if (GET_CODE (comparison_value) == CONST_INT)
8332 comparison_val = INTVAL (comparison_value);
8333 initial_value = bl->initial_value;
8335 /* Normalize the initial value if it is an integer and
8336 has no other use except as a counter. This will allow
8337 a few more loops to be reversed. */
8338 if (no_use_except_counting
8339 && GET_CODE (comparison_value) == CONST_INT
8340 && GET_CODE (initial_value) == CONST_INT)
8342 comparison_val = comparison_val - INTVAL (bl->initial_value);
8343 /* The code below requires comparison_val to be a multiple
8344 of add_val in order to do the loop reversal, so
8345 round up comparison_val to a multiple of add_val.
8346 Since comparison_value is constant, we know that the
8347 current comparison code is LT. */
8348 comparison_val = comparison_val + add_val - 1;
8349 comparison_val
8350 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8351 /* We postpone overflow checks for COMPARISON_VAL here;
8352 even if there is an overflow, we might still be able to
8353 reverse the loop, if converting the loop exit test to
8354 NE is possible. */
8355 initial_value = const0_rtx;
8358 /* First check if we can do a vanilla loop reversal. */
8359 if (initial_value == const0_rtx
8360 /* If we have a decrement_and_branch_on_count,
8361 prefer the NE test, since this will allow that
8362 instruction to be generated. Note that we must
8363 use a vanilla loop reversal if the biv is used to
8364 calculate a giv or has a non-counting use. */
8365 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8366 && defined (HAVE_decrement_and_branch_on_count)
8367 && (! (add_val == 1 && loop->vtop
8368 && (bl->biv_count == 0
8369 || no_use_except_counting)))
8370 #endif
8371 && GET_CODE (comparison_value) == CONST_INT
8372 /* Now do postponed overflow checks on COMPARISON_VAL. */
8373 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8374 & comparison_sign_mask))
8376 /* Register will always be nonnegative, with value
8377 0 on last iteration */
8378 add_adjust = add_val;
8379 nonneg = 1;
8380 cmp_code = GE;
8382 else if (add_val == 1 && loop->vtop
8383 && (bl->biv_count == 0
8384 || no_use_except_counting))
8386 add_adjust = 0;
8387 cmp_code = NE;
8389 else
8390 return 0;
8392 if (GET_CODE (comparison) == LE)
8393 add_adjust -= add_val;
8395 /* If the initial value is not zero, or if the comparison
8396 value is not an exact multiple of the increment, then we
8397 can not reverse this loop. */
8398 if (initial_value == const0_rtx
8399 && GET_CODE (comparison_value) == CONST_INT)
8401 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8402 return 0;
8404 else
8406 if (! no_use_except_counting || add_val != 1)
8407 return 0;
8410 final_value = comparison_value;
8412 /* Reset these in case we normalized the initial value
8413 and comparison value above. */
8414 if (GET_CODE (comparison_value) == CONST_INT
8415 && GET_CODE (initial_value) == CONST_INT)
8417 comparison_value = GEN_INT (comparison_val);
8418 final_value
8419 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8421 bl->initial_value = initial_value;
8423 /* Save some info needed to produce the new insns. */
8424 reg = bl->biv->dest_reg;
8425 jump_label = condjump_label (PREV_INSN (loop_end));
8426 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8428 /* Set start_value; if this is not a CONST_INT, we need
8429 to generate a SUB.
8430 Initialize biv to start_value before loop start.
8431 The old initializing insn will be deleted as a
8432 dead store by flow.c. */
8433 if (initial_value == const0_rtx
8434 && GET_CODE (comparison_value) == CONST_INT)
8436 start_value = GEN_INT (comparison_val - add_adjust);
8437 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8439 else if (GET_CODE (initial_value) == CONST_INT)
8441 enum machine_mode mode = GET_MODE (reg);
8442 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8443 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8445 if (add_insn == 0)
8446 return 0;
8448 start_value
8449 = gen_rtx_PLUS (mode, comparison_value, offset);
8450 loop_insn_hoist (loop, add_insn);
8451 if (GET_CODE (comparison) == LE)
8452 final_value = gen_rtx_PLUS (mode, comparison_value,
8453 GEN_INT (add_val));
8455 else if (! add_adjust)
8457 enum machine_mode mode = GET_MODE (reg);
8458 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8459 initial_value);
8461 if (sub_insn == 0)
8462 return 0;
8463 start_value
8464 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8465 loop_insn_hoist (loop, sub_insn);
8467 else
8468 /* We could handle the other cases too, but it'll be
8469 better to have a testcase first. */
8470 return 0;
8472 /* We may not have a single insn which can increment a reg, so
8473 create a sequence to hold all the insns from expand_inc. */
8474 start_sequence ();
8475 expand_inc (reg, new_add_val);
8476 tem = get_insns ();
8477 end_sequence ();
8479 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8480 delete_insn (bl->biv->insn);
8482 /* Update biv info to reflect its new status. */
8483 bl->biv->insn = p;
8484 bl->initial_value = start_value;
8485 bl->biv->add_val = new_add_val;
8487 /* Update loop info. */
8488 loop_info->initial_value = reg;
8489 loop_info->initial_equiv_value = reg;
8490 loop_info->final_value = const0_rtx;
8491 loop_info->final_equiv_value = const0_rtx;
8492 loop_info->comparison_value = const0_rtx;
8493 loop_info->comparison_code = cmp_code;
8494 loop_info->increment = new_add_val;
8496 /* Inc LABEL_NUSES so that delete_insn will
8497 not delete the label. */
8498 LABEL_NUSES (XEXP (jump_label, 0))++;
8500 /* Emit an insn after the end of the loop to set the biv's
8501 proper exit value if it is used anywhere outside the loop. */
8502 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8503 || ! bl->init_insn
8504 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8505 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8507 /* Delete compare/branch at end of loop. */
8508 delete_related_insns (PREV_INSN (loop_end));
8509 if (compare_and_branch == 2)
8510 delete_related_insns (first_compare);
8512 /* Add new compare/branch insn at end of loop. */
8513 start_sequence ();
8514 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8515 GET_MODE (reg), 0,
8516 XEXP (jump_label, 0));
8517 tem = get_insns ();
8518 end_sequence ();
8519 emit_jump_insn_before (tem, loop_end);
8521 for (tem = PREV_INSN (loop_end);
8522 tem && GET_CODE (tem) != JUMP_INSN;
8523 tem = PREV_INSN (tem))
8526 if (tem)
8527 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8529 if (nonneg)
8531 if (tem)
8533 /* Increment of LABEL_NUSES done above. */
8534 /* Register is now always nonnegative,
8535 so add REG_NONNEG note to the branch. */
8536 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8537 REG_NOTES (tem));
8539 bl->nonneg = 1;
8542 /* No insn may reference both the reversed and another biv or it
8543 will fail (see comment near the top of the loop reversal
8544 code).
8545 Earlier on, we have verified that the biv has no use except
8546 counting, or it is the only biv in this function.
8547 However, the code that computes no_use_except_counting does
8548 not verify reg notes. It's possible to have an insn that
8549 references another biv, and has a REG_EQUAL note with an
8550 expression based on the reversed biv. To avoid this case,
8551 remove all REG_EQUAL notes based on the reversed biv
8552 here. */
8553 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8554 if (INSN_P (p))
8556 rtx *pnote;
8557 rtx set = single_set (p);
8558 /* If this is a set of a GIV based on the reversed biv, any
8559 REG_EQUAL notes should still be correct. */
8560 if (! set
8561 || GET_CODE (SET_DEST (set)) != REG
8562 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8563 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8564 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8565 for (pnote = &REG_NOTES (p); *pnote;)
8567 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8568 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8569 XEXP (*pnote, 0)))
8570 *pnote = XEXP (*pnote, 1);
8571 else
8572 pnote = &XEXP (*pnote, 1);
8576 /* Mark that this biv has been reversed. Each giv which depends
8577 on this biv, and which is also live past the end of the loop
8578 will have to be fixed up. */
8580 bl->reversed = 1;
8582 if (loop_dump_stream)
8584 fprintf (loop_dump_stream, "Reversed loop");
8585 if (bl->nonneg)
8586 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8587 else
8588 fprintf (loop_dump_stream, "\n");
8591 return 1;
8596 return 0;
8599 /* Verify whether the biv BL appears to be eliminable,
8600 based on the insns in the loop that refer to it.
8602 If ELIMINATE_P is nonzero, actually do the elimination.
8604 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8605 determine whether invariant insns should be placed inside or at the
8606 start of the loop. */
8608 static int
8609 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8610 const struct loop *loop;
8611 struct iv_class *bl;
8612 int eliminate_p;
8613 int threshold, insn_count;
8615 struct loop_ivs *ivs = LOOP_IVS (loop);
8616 rtx reg = bl->biv->dest_reg;
8617 rtx p;
8619 /* Scan all insns in the loop, stopping if we find one that uses the
8620 biv in a way that we cannot eliminate. */
8622 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8624 enum rtx_code code = GET_CODE (p);
8625 basic_block where_bb = 0;
8626 rtx where_insn = threshold >= insn_count ? 0 : p;
8628 /* If this is a libcall that sets a giv, skip ahead to its end. */
8629 if (GET_RTX_CLASS (code) == 'i')
8631 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8633 if (note)
8635 rtx last = XEXP (note, 0);
8636 rtx set = single_set (last);
8638 if (set && GET_CODE (SET_DEST (set)) == REG)
8640 unsigned int regno = REGNO (SET_DEST (set));
8642 if (regno < ivs->n_regs
8643 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8644 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8645 p = last;
8649 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8650 && reg_mentioned_p (reg, PATTERN (p))
8651 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8652 eliminate_p, where_bb, where_insn))
8654 if (loop_dump_stream)
8655 fprintf (loop_dump_stream,
8656 "Cannot eliminate biv %d: biv used in insn %d.\n",
8657 bl->regno, INSN_UID (p));
8658 break;
8662 if (p == loop->end)
8664 if (loop_dump_stream)
8665 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8666 bl->regno, eliminate_p ? "was" : "can be");
8667 return 1;
8670 return 0;
8673 /* INSN and REFERENCE are instructions in the same insn chain.
8674 Return nonzero if INSN is first. */
8677 loop_insn_first_p (insn, reference)
8678 rtx insn, reference;
8680 rtx p, q;
8682 for (p = insn, q = reference;;)
8684 /* Start with test for not first so that INSN == REFERENCE yields not
8685 first. */
8686 if (q == insn || ! p)
8687 return 0;
8688 if (p == reference || ! q)
8689 return 1;
8691 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8692 previous insn, hence the <= comparison below does not work if
8693 P is a note. */
8694 if (INSN_UID (p) < max_uid_for_loop
8695 && INSN_UID (q) < max_uid_for_loop
8696 && GET_CODE (p) != NOTE)
8697 return INSN_LUID (p) <= INSN_LUID (q);
8699 if (INSN_UID (p) >= max_uid_for_loop
8700 || GET_CODE (p) == NOTE)
8701 p = NEXT_INSN (p);
8702 if (INSN_UID (q) >= max_uid_for_loop)
8703 q = NEXT_INSN (q);
8707 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8708 the offset that we have to take into account due to auto-increment /
8709 div derivation is zero. */
8710 static int
8711 biv_elimination_giv_has_0_offset (biv, giv, insn)
8712 struct induction *biv, *giv;
8713 rtx insn;
8715 /* If the giv V had the auto-inc address optimization applied
8716 to it, and INSN occurs between the giv insn and the biv
8717 insn, then we'd have to adjust the value used here.
8718 This is rare, so we don't bother to make this possible. */
8719 if (giv->auto_inc_opt
8720 && ((loop_insn_first_p (giv->insn, insn)
8721 && loop_insn_first_p (insn, biv->insn))
8722 || (loop_insn_first_p (biv->insn, insn)
8723 && loop_insn_first_p (insn, giv->insn))))
8724 return 0;
8726 return 1;
8729 /* If BL appears in X (part of the pattern of INSN), see if we can
8730 eliminate its use. If so, return 1. If not, return 0.
8732 If BIV does not appear in X, return 1.
8734 If ELIMINATE_P is nonzero, actually do the elimination.
8735 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8736 Depending on how many items have been moved out of the loop, it
8737 will either be before INSN (when WHERE_INSN is nonzero) or at the
8738 start of the loop (when WHERE_INSN is zero). */
8740 static int
8741 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8742 const struct loop *loop;
8743 rtx x, insn;
8744 struct iv_class *bl;
8745 int eliminate_p;
8746 basic_block where_bb;
8747 rtx where_insn;
8749 enum rtx_code code = GET_CODE (x);
8750 rtx reg = bl->biv->dest_reg;
8751 enum machine_mode mode = GET_MODE (reg);
8752 struct induction *v;
8753 rtx arg, tem;
8754 #ifdef HAVE_cc0
8755 rtx new;
8756 #endif
8757 int arg_operand;
8758 const char *fmt;
8759 int i, j;
8761 switch (code)
8763 case REG:
8764 /* If we haven't already been able to do something with this BIV,
8765 we can't eliminate it. */
8766 if (x == reg)
8767 return 0;
8768 return 1;
8770 case SET:
8771 /* If this sets the BIV, it is not a problem. */
8772 if (SET_DEST (x) == reg)
8773 return 1;
8775 /* If this is an insn that defines a giv, it is also ok because
8776 it will go away when the giv is reduced. */
8777 for (v = bl->giv; v; v = v->next_iv)
8778 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8779 return 1;
8781 #ifdef HAVE_cc0
8782 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8784 /* Can replace with any giv that was reduced and
8785 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8786 Require a constant for MULT_VAL, so we know it's nonzero.
8787 ??? We disable this optimization to avoid potential
8788 overflows. */
8790 for (v = bl->giv; v; v = v->next_iv)
8791 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8792 && v->add_val == const0_rtx
8793 && ! v->ignore && ! v->maybe_dead && v->always_computable
8794 && v->mode == mode
8795 && 0)
8797 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8798 continue;
8800 if (! eliminate_p)
8801 return 1;
8803 /* If the giv has the opposite direction of change,
8804 then reverse the comparison. */
8805 if (INTVAL (v->mult_val) < 0)
8806 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8807 const0_rtx, v->new_reg);
8808 else
8809 new = v->new_reg;
8811 /* We can probably test that giv's reduced reg. */
8812 if (validate_change (insn, &SET_SRC (x), new, 0))
8813 return 1;
8816 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8817 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8818 Require a constant for MULT_VAL, so we know it's nonzero.
8819 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8820 overflow problem. */
8822 for (v = bl->giv; v; v = v->next_iv)
8823 if (GET_CODE (v->mult_val) == CONST_INT
8824 && v->mult_val != const0_rtx
8825 && ! v->ignore && ! v->maybe_dead && v->always_computable
8826 && v->mode == mode
8827 && (GET_CODE (v->add_val) == SYMBOL_REF
8828 || GET_CODE (v->add_val) == LABEL_REF
8829 || GET_CODE (v->add_val) == CONST
8830 || (GET_CODE (v->add_val) == REG
8831 && REG_POINTER (v->add_val))))
8833 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8834 continue;
8836 if (! eliminate_p)
8837 return 1;
8839 /* If the giv has the opposite direction of change,
8840 then reverse the comparison. */
8841 if (INTVAL (v->mult_val) < 0)
8842 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8843 v->new_reg);
8844 else
8845 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8846 copy_rtx (v->add_val));
8848 /* Replace biv with the giv's reduced register. */
8849 update_reg_last_use (v->add_val, insn);
8850 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8851 return 1;
8853 /* Insn doesn't support that constant or invariant. Copy it
8854 into a register (it will be a loop invariant.) */
8855 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8857 loop_insn_emit_before (loop, 0, where_insn,
8858 gen_move_insn (tem,
8859 copy_rtx (v->add_val)));
8861 /* Substitute the new register for its invariant value in
8862 the compare expression. */
8863 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8864 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8865 return 1;
8868 #endif
8869 break;
8871 case COMPARE:
8872 case EQ: case NE:
8873 case GT: case GE: case GTU: case GEU:
8874 case LT: case LE: case LTU: case LEU:
8875 /* See if either argument is the biv. */
8876 if (XEXP (x, 0) == reg)
8877 arg = XEXP (x, 1), arg_operand = 1;
8878 else if (XEXP (x, 1) == reg)
8879 arg = XEXP (x, 0), arg_operand = 0;
8880 else
8881 break;
8883 if (CONSTANT_P (arg))
8885 /* First try to replace with any giv that has constant positive
8886 mult_val and constant add_val. We might be able to support
8887 negative mult_val, but it seems complex to do it in general. */
8889 for (v = bl->giv; v; v = v->next_iv)
8890 if (GET_CODE (v->mult_val) == CONST_INT
8891 && INTVAL (v->mult_val) > 0
8892 && (GET_CODE (v->add_val) == SYMBOL_REF
8893 || GET_CODE (v->add_val) == LABEL_REF
8894 || GET_CODE (v->add_val) == CONST
8895 || (GET_CODE (v->add_val) == REG
8896 && REG_POINTER (v->add_val)))
8897 && ! v->ignore && ! v->maybe_dead && v->always_computable
8898 && v->mode == mode)
8900 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8901 continue;
8903 /* Don't eliminate if the linear combination that makes up
8904 the giv overflows when it is applied to ARG. */
8905 if (GET_CODE (arg) == CONST_INT)
8907 rtx add_val;
8909 if (GET_CODE (v->add_val) == CONST_INT)
8910 add_val = v->add_val;
8911 else
8912 add_val = const0_rtx;
8914 if (const_mult_add_overflow_p (arg, v->mult_val,
8915 add_val, mode, 1))
8916 continue;
8919 if (! eliminate_p)
8920 return 1;
8922 /* Replace biv with the giv's reduced reg. */
8923 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8925 /* If all constants are actually constant integers and
8926 the derived constant can be directly placed in the COMPARE,
8927 do so. */
8928 if (GET_CODE (arg) == CONST_INT
8929 && GET_CODE (v->add_val) == CONST_INT)
8931 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8932 v->add_val, mode, 1);
8934 else
8936 /* Otherwise, load it into a register. */
8937 tem = gen_reg_rtx (mode);
8938 loop_iv_add_mult_emit_before (loop, arg,
8939 v->mult_val, v->add_val,
8940 tem, where_bb, where_insn);
8943 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8945 if (apply_change_group ())
8946 return 1;
8949 /* Look for giv with positive constant mult_val and nonconst add_val.
8950 Insert insns to calculate new compare value.
8951 ??? Turn this off due to possible overflow. */
8953 for (v = bl->giv; v; v = v->next_iv)
8954 if (GET_CODE (v->mult_val) == CONST_INT
8955 && INTVAL (v->mult_val) > 0
8956 && ! v->ignore && ! v->maybe_dead && v->always_computable
8957 && v->mode == mode
8958 && 0)
8960 rtx tem;
8962 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8963 continue;
8965 if (! eliminate_p)
8966 return 1;
8968 tem = gen_reg_rtx (mode);
8970 /* Replace biv with giv's reduced register. */
8971 validate_change (insn, &XEXP (x, 1 - arg_operand),
8972 v->new_reg, 1);
8974 /* Compute value to compare against. */
8975 loop_iv_add_mult_emit_before (loop, arg,
8976 v->mult_val, v->add_val,
8977 tem, where_bb, where_insn);
8978 /* Use it in this insn. */
8979 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8980 if (apply_change_group ())
8981 return 1;
8984 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8986 if (loop_invariant_p (loop, arg) == 1)
8988 /* Look for giv with constant positive mult_val and nonconst
8989 add_val. Insert insns to compute new compare value.
8990 ??? Turn this off due to possible overflow. */
8992 for (v = bl->giv; v; v = v->next_iv)
8993 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8994 && ! v->ignore && ! v->maybe_dead && v->always_computable
8995 && v->mode == mode
8996 && 0)
8998 rtx tem;
9000 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9001 continue;
9003 if (! eliminate_p)
9004 return 1;
9006 tem = gen_reg_rtx (mode);
9008 /* Replace biv with giv's reduced register. */
9009 validate_change (insn, &XEXP (x, 1 - arg_operand),
9010 v->new_reg, 1);
9012 /* Compute value to compare against. */
9013 loop_iv_add_mult_emit_before (loop, arg,
9014 v->mult_val, v->add_val,
9015 tem, where_bb, where_insn);
9016 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
9017 if (apply_change_group ())
9018 return 1;
9022 /* This code has problems. Basically, you can't know when
9023 seeing if we will eliminate BL, whether a particular giv
9024 of ARG will be reduced. If it isn't going to be reduced,
9025 we can't eliminate BL. We can try forcing it to be reduced,
9026 but that can generate poor code.
9028 The problem is that the benefit of reducing TV, below should
9029 be increased if BL can actually be eliminated, but this means
9030 we might have to do a topological sort of the order in which
9031 we try to process biv. It doesn't seem worthwhile to do
9032 this sort of thing now. */
9034 #if 0
9035 /* Otherwise the reg compared with had better be a biv. */
9036 if (GET_CODE (arg) != REG
9037 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9038 return 0;
9040 /* Look for a pair of givs, one for each biv,
9041 with identical coefficients. */
9042 for (v = bl->giv; v; v = v->next_iv)
9044 struct induction *tv;
9046 if (v->ignore || v->maybe_dead || v->mode != mode)
9047 continue;
9049 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9050 tv = tv->next_iv)
9051 if (! tv->ignore && ! tv->maybe_dead
9052 && rtx_equal_p (tv->mult_val, v->mult_val)
9053 && rtx_equal_p (tv->add_val, v->add_val)
9054 && tv->mode == mode)
9056 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9057 continue;
9059 if (! eliminate_p)
9060 return 1;
9062 /* Replace biv with its giv's reduced reg. */
9063 XEXP (x, 1 - arg_operand) = v->new_reg;
9064 /* Replace other operand with the other giv's
9065 reduced reg. */
9066 XEXP (x, arg_operand) = tv->new_reg;
9067 return 1;
9070 #endif
9073 /* If we get here, the biv can't be eliminated. */
9074 return 0;
9076 case MEM:
9077 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9078 biv is used in it, since it will be replaced. */
9079 for (v = bl->giv; v; v = v->next_iv)
9080 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9081 return 1;
9082 break;
9084 default:
9085 break;
9088 /* See if any subexpression fails elimination. */
9089 fmt = GET_RTX_FORMAT (code);
9090 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9092 switch (fmt[i])
9094 case 'e':
9095 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9096 eliminate_p, where_bb, where_insn))
9097 return 0;
9098 break;
9100 case 'E':
9101 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9102 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9103 eliminate_p, where_bb, where_insn))
9104 return 0;
9105 break;
9109 return 1;
9112 /* Return nonzero if the last use of REG
9113 is in an insn following INSN in the same basic block. */
9115 static int
9116 last_use_this_basic_block (reg, insn)
9117 rtx reg;
9118 rtx insn;
9120 rtx n;
9121 for (n = insn;
9122 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9123 n = NEXT_INSN (n))
9125 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9126 return 1;
9128 return 0;
9131 /* Called via `note_stores' to record the initial value of a biv. Here we
9132 just record the location of the set and process it later. */
9134 static void
9135 record_initial (dest, set, data)
9136 rtx dest;
9137 rtx set;
9138 void *data ATTRIBUTE_UNUSED;
9140 struct loop_ivs *ivs = (struct loop_ivs *) data;
9141 struct iv_class *bl;
9143 if (GET_CODE (dest) != REG
9144 || REGNO (dest) >= ivs->n_regs
9145 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9146 return;
9148 bl = REG_IV_CLASS (ivs, REGNO (dest));
9150 /* If this is the first set found, record it. */
9151 if (bl->init_insn == 0)
9153 bl->init_insn = note_insn;
9154 bl->init_set = set;
9158 /* If any of the registers in X are "old" and currently have a last use earlier
9159 than INSN, update them to have a last use of INSN. Their actual last use
9160 will be the previous insn but it will not have a valid uid_luid so we can't
9161 use it. X must be a source expression only. */
9163 static void
9164 update_reg_last_use (x, insn)
9165 rtx x;
9166 rtx insn;
9168 /* Check for the case where INSN does not have a valid luid. In this case,
9169 there is no need to modify the regno_last_uid, as this can only happen
9170 when code is inserted after the loop_end to set a pseudo's final value,
9171 and hence this insn will never be the last use of x.
9172 ???? This comment is not correct. See for example loop_givs_reduce.
9173 This may insert an insn before another new insn. */
9174 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9175 && INSN_UID (insn) < max_uid_for_loop
9176 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9178 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9180 else
9182 int i, j;
9183 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9184 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9186 if (fmt[i] == 'e')
9187 update_reg_last_use (XEXP (x, i), insn);
9188 else if (fmt[i] == 'E')
9189 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9190 update_reg_last_use (XVECEXP (x, i, j), insn);
9195 /* Given an insn INSN and condition COND, return the condition in a
9196 canonical form to simplify testing by callers. Specifically:
9198 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9199 (2) Both operands will be machine operands; (cc0) will have been replaced.
9200 (3) If an operand is a constant, it will be the second operand.
9201 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9202 for GE, GEU, and LEU.
9204 If the condition cannot be understood, or is an inequality floating-point
9205 comparison which needs to be reversed, 0 will be returned.
9207 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9209 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9210 insn used in locating the condition was found. If a replacement test
9211 of the condition is desired, it should be placed in front of that
9212 insn and we will be sure that the inputs are still valid.
9214 If WANT_REG is nonzero, we wish the condition to be relative to that
9215 register, if possible. Therefore, do not canonicalize the condition
9216 further. */
9219 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9220 rtx insn;
9221 rtx cond;
9222 int reverse;
9223 rtx *earliest;
9224 rtx want_reg;
9226 enum rtx_code code;
9227 rtx prev = insn;
9228 rtx set;
9229 rtx tem;
9230 rtx op0, op1;
9231 int reverse_code = 0;
9232 enum machine_mode mode;
9234 code = GET_CODE (cond);
9235 mode = GET_MODE (cond);
9236 op0 = XEXP (cond, 0);
9237 op1 = XEXP (cond, 1);
9239 if (reverse)
9240 code = reversed_comparison_code (cond, insn);
9241 if (code == UNKNOWN)
9242 return 0;
9244 if (earliest)
9245 *earliest = insn;
9247 /* If we are comparing a register with zero, see if the register is set
9248 in the previous insn to a COMPARE or a comparison operation. Perform
9249 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9250 in cse.c */
9252 while (GET_RTX_CLASS (code) == '<'
9253 && op1 == CONST0_RTX (GET_MODE (op0))
9254 && op0 != want_reg)
9256 /* Set nonzero when we find something of interest. */
9257 rtx x = 0;
9259 #ifdef HAVE_cc0
9260 /* If comparison with cc0, import actual comparison from compare
9261 insn. */
9262 if (op0 == cc0_rtx)
9264 if ((prev = prev_nonnote_insn (prev)) == 0
9265 || GET_CODE (prev) != INSN
9266 || (set = single_set (prev)) == 0
9267 || SET_DEST (set) != cc0_rtx)
9268 return 0;
9270 op0 = SET_SRC (set);
9271 op1 = CONST0_RTX (GET_MODE (op0));
9272 if (earliest)
9273 *earliest = prev;
9275 #endif
9277 /* If this is a COMPARE, pick up the two things being compared. */
9278 if (GET_CODE (op0) == COMPARE)
9280 op1 = XEXP (op0, 1);
9281 op0 = XEXP (op0, 0);
9282 continue;
9284 else if (GET_CODE (op0) != REG)
9285 break;
9287 /* Go back to the previous insn. Stop if it is not an INSN. We also
9288 stop if it isn't a single set or if it has a REG_INC note because
9289 we don't want to bother dealing with it. */
9291 if ((prev = prev_nonnote_insn (prev)) == 0
9292 || GET_CODE (prev) != INSN
9293 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9294 break;
9296 set = set_of (op0, prev);
9298 if (set
9299 && (GET_CODE (set) != SET
9300 || !rtx_equal_p (SET_DEST (set), op0)))
9301 break;
9303 /* If this is setting OP0, get what it sets it to if it looks
9304 relevant. */
9305 if (set)
9307 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9308 #ifdef FLOAT_STORE_FLAG_VALUE
9309 REAL_VALUE_TYPE fsfv;
9310 #endif
9312 /* ??? We may not combine comparisons done in a CCmode with
9313 comparisons not done in a CCmode. This is to aid targets
9314 like Alpha that have an IEEE compliant EQ instruction, and
9315 a non-IEEE compliant BEQ instruction. The use of CCmode is
9316 actually artificial, simply to prevent the combination, but
9317 should not affect other platforms.
9319 However, we must allow VOIDmode comparisons to match either
9320 CCmode or non-CCmode comparison, because some ports have
9321 modeless comparisons inside branch patterns.
9323 ??? This mode check should perhaps look more like the mode check
9324 in simplify_comparison in combine. */
9326 if ((GET_CODE (SET_SRC (set)) == COMPARE
9327 || (((code == NE
9328 || (code == LT
9329 && GET_MODE_CLASS (inner_mode) == MODE_INT
9330 && (GET_MODE_BITSIZE (inner_mode)
9331 <= HOST_BITS_PER_WIDE_INT)
9332 && (STORE_FLAG_VALUE
9333 & ((HOST_WIDE_INT) 1
9334 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9335 #ifdef FLOAT_STORE_FLAG_VALUE
9336 || (code == LT
9337 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9338 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9339 REAL_VALUE_NEGATIVE (fsfv)))
9340 #endif
9342 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9343 && (((GET_MODE_CLASS (mode) == MODE_CC)
9344 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9345 || mode == VOIDmode || inner_mode == VOIDmode))
9346 x = SET_SRC (set);
9347 else if (((code == EQ
9348 || (code == GE
9349 && (GET_MODE_BITSIZE (inner_mode)
9350 <= HOST_BITS_PER_WIDE_INT)
9351 && GET_MODE_CLASS (inner_mode) == MODE_INT
9352 && (STORE_FLAG_VALUE
9353 & ((HOST_WIDE_INT) 1
9354 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9355 #ifdef FLOAT_STORE_FLAG_VALUE
9356 || (code == GE
9357 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9358 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9359 REAL_VALUE_NEGATIVE (fsfv)))
9360 #endif
9362 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9363 && (((GET_MODE_CLASS (mode) == MODE_CC)
9364 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9365 || mode == VOIDmode || inner_mode == VOIDmode))
9368 reverse_code = 1;
9369 x = SET_SRC (set);
9371 else
9372 break;
9375 else if (reg_set_p (op0, prev))
9376 /* If this sets OP0, but not directly, we have to give up. */
9377 break;
9379 if (x)
9381 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9382 code = GET_CODE (x);
9383 if (reverse_code)
9385 code = reversed_comparison_code (x, prev);
9386 if (code == UNKNOWN)
9387 return 0;
9388 reverse_code = 0;
9391 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9392 if (earliest)
9393 *earliest = prev;
9397 /* If constant is first, put it last. */
9398 if (CONSTANT_P (op0))
9399 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9401 /* If OP0 is the result of a comparison, we weren't able to find what
9402 was really being compared, so fail. */
9403 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9404 return 0;
9406 /* Canonicalize any ordered comparison with integers involving equality
9407 if we can do computations in the relevant mode and we do not
9408 overflow. */
9410 if (GET_CODE (op1) == CONST_INT
9411 && GET_MODE (op0) != VOIDmode
9412 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9414 HOST_WIDE_INT const_val = INTVAL (op1);
9415 unsigned HOST_WIDE_INT uconst_val = const_val;
9416 unsigned HOST_WIDE_INT max_val
9417 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9419 switch (code)
9421 case LE:
9422 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9423 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9424 break;
9426 /* When cross-compiling, const_val might be sign-extended from
9427 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9428 case GE:
9429 if ((HOST_WIDE_INT) (const_val & max_val)
9430 != (((HOST_WIDE_INT) 1
9431 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9432 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9433 break;
9435 case LEU:
9436 if (uconst_val < max_val)
9437 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9438 break;
9440 case GEU:
9441 if (uconst_val != 0)
9442 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9443 break;
9445 default:
9446 break;
9450 #ifdef HAVE_cc0
9451 /* Never return CC0; return zero instead. */
9452 if (op0 == cc0_rtx)
9453 return 0;
9454 #endif
9456 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9459 /* Given a jump insn JUMP, return the condition that will cause it to branch
9460 to its JUMP_LABEL. If the condition cannot be understood, or is an
9461 inequality floating-point comparison which needs to be reversed, 0 will
9462 be returned.
9464 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9465 insn used in locating the condition was found. If a replacement test
9466 of the condition is desired, it should be placed in front of that
9467 insn and we will be sure that the inputs are still valid. */
9470 get_condition (jump, earliest)
9471 rtx jump;
9472 rtx *earliest;
9474 rtx cond;
9475 int reverse;
9476 rtx set;
9478 /* If this is not a standard conditional jump, we can't parse it. */
9479 if (GET_CODE (jump) != JUMP_INSN
9480 || ! any_condjump_p (jump))
9481 return 0;
9482 set = pc_set (jump);
9484 cond = XEXP (SET_SRC (set), 0);
9486 /* If this branches to JUMP_LABEL when the condition is false, reverse
9487 the condition. */
9488 reverse
9489 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9490 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9492 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9495 /* Similar to above routine, except that we also put an invariant last
9496 unless both operands are invariants. */
9499 get_condition_for_loop (loop, x)
9500 const struct loop *loop;
9501 rtx x;
9503 rtx comparison = get_condition (x, (rtx*) 0);
9505 if (comparison == 0
9506 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9507 || loop_invariant_p (loop, XEXP (comparison, 1)))
9508 return comparison;
9510 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9511 XEXP (comparison, 1), XEXP (comparison, 0));
9514 /* Scan the function and determine whether it has indirect (computed) jumps.
9516 This is taken mostly from flow.c; similar code exists elsewhere
9517 in the compiler. It may be useful to put this into rtlanal.c. */
9518 static int
9519 indirect_jump_in_function_p (start)
9520 rtx start;
9522 rtx insn;
9524 for (insn = start; insn; insn = NEXT_INSN (insn))
9525 if (computed_jump_p (insn))
9526 return 1;
9528 return 0;
9531 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9532 documentation for LOOP_MEMS for the definition of `appropriate'.
9533 This function is called from prescan_loop via for_each_rtx. */
9535 static int
9536 insert_loop_mem (mem, data)
9537 rtx *mem;
9538 void *data ATTRIBUTE_UNUSED;
9540 struct loop_info *loop_info = data;
9541 int i;
9542 rtx m = *mem;
9544 if (m == NULL_RTX)
9545 return 0;
9547 switch (GET_CODE (m))
9549 case MEM:
9550 break;
9552 case CLOBBER:
9553 /* We're not interested in MEMs that are only clobbered. */
9554 return -1;
9556 case CONST_DOUBLE:
9557 /* We're not interested in the MEM associated with a
9558 CONST_DOUBLE, so there's no need to traverse into this. */
9559 return -1;
9561 case EXPR_LIST:
9562 /* We're not interested in any MEMs that only appear in notes. */
9563 return -1;
9565 default:
9566 /* This is not a MEM. */
9567 return 0;
9570 /* See if we've already seen this MEM. */
9571 for (i = 0; i < loop_info->mems_idx; ++i)
9572 if (rtx_equal_p (m, loop_info->mems[i].mem))
9574 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9575 /* The modes of the two memory accesses are different. If
9576 this happens, something tricky is going on, and we just
9577 don't optimize accesses to this MEM. */
9578 loop_info->mems[i].optimize = 0;
9580 return 0;
9583 /* Resize the array, if necessary. */
9584 if (loop_info->mems_idx == loop_info->mems_allocated)
9586 if (loop_info->mems_allocated != 0)
9587 loop_info->mems_allocated *= 2;
9588 else
9589 loop_info->mems_allocated = 32;
9591 loop_info->mems = (loop_mem_info *)
9592 xrealloc (loop_info->mems,
9593 loop_info->mems_allocated * sizeof (loop_mem_info));
9596 /* Actually insert the MEM. */
9597 loop_info->mems[loop_info->mems_idx].mem = m;
9598 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9599 because we can't put it in a register. We still store it in the
9600 table, though, so that if we see the same address later, but in a
9601 non-BLK mode, we'll not think we can optimize it at that point. */
9602 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9603 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9604 ++loop_info->mems_idx;
9606 return 0;
9610 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9612 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9613 register that is modified by an insn between FROM and TO. If the
9614 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9615 more, stop incrementing it, to avoid overflow.
9617 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9618 register I is used, if it is only used once. Otherwise, it is set
9619 to 0 (for no uses) or const0_rtx for more than one use. This
9620 parameter may be zero, in which case this processing is not done.
9622 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9623 optimize register I. */
9625 static void
9626 loop_regs_scan (loop, extra_size)
9627 const struct loop *loop;
9628 int extra_size;
9630 struct loop_regs *regs = LOOP_REGS (loop);
9631 int old_nregs;
9632 /* last_set[n] is nonzero iff reg n has been set in the current
9633 basic block. In that case, it is the insn that last set reg n. */
9634 rtx *last_set;
9635 rtx insn;
9636 int i;
9638 old_nregs = regs->num;
9639 regs->num = max_reg_num ();
9641 /* Grow the regs array if not allocated or too small. */
9642 if (regs->num >= regs->size)
9644 regs->size = regs->num + extra_size;
9646 regs->array = (struct loop_reg *)
9647 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9649 /* Zero the new elements. */
9650 memset (regs->array + old_nregs, 0,
9651 (regs->size - old_nregs) * sizeof (*regs->array));
9654 /* Clear previously scanned fields but do not clear n_times_set. */
9655 for (i = 0; i < old_nregs; i++)
9657 regs->array[i].set_in_loop = 0;
9658 regs->array[i].may_not_optimize = 0;
9659 regs->array[i].single_usage = NULL_RTX;
9662 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9664 /* Scan the loop, recording register usage. */
9665 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9666 insn = NEXT_INSN (insn))
9668 if (INSN_P (insn))
9670 /* Record registers that have exactly one use. */
9671 find_single_use_in_loop (regs, insn, PATTERN (insn));
9673 /* Include uses in REG_EQUAL notes. */
9674 if (REG_NOTES (insn))
9675 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9677 if (GET_CODE (PATTERN (insn)) == SET
9678 || GET_CODE (PATTERN (insn)) == CLOBBER)
9679 count_one_set (regs, insn, PATTERN (insn), last_set);
9680 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9682 int i;
9683 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9684 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9685 last_set);
9689 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9690 memset (last_set, 0, regs->num * sizeof (rtx));
9692 /* Invalidate all registers used for function argument passing.
9693 We check rtx_varies_p for the same reason as below, to allow
9694 optimizing PIC calculations. */
9695 if (GET_CODE (insn) == CALL_INSN)
9697 rtx link;
9698 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9699 link;
9700 link = XEXP (link, 1))
9702 rtx op, reg;
9704 if (GET_CODE (op = XEXP (link, 0)) == USE
9705 && GET_CODE (reg = XEXP (op, 0)) == REG
9706 && rtx_varies_p (reg, 1))
9707 regs->array[REGNO (reg)].may_not_optimize = 1;
9712 /* Invalidate all hard registers clobbered by calls. With one exception:
9713 a call-clobbered PIC register is still function-invariant for our
9714 purposes, since we can hoist any PIC calculations out of the loop.
9715 Thus the call to rtx_varies_p. */
9716 if (LOOP_INFO (loop)->has_call)
9717 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9718 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9719 && rtx_varies_p (regno_reg_rtx[i], 1))
9721 regs->array[i].may_not_optimize = 1;
9722 regs->array[i].set_in_loop = 1;
9725 #ifdef AVOID_CCMODE_COPIES
9726 /* Don't try to move insns which set CC registers if we should not
9727 create CCmode register copies. */
9728 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9729 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9730 regs->array[i].may_not_optimize = 1;
9731 #endif
9733 /* Set regs->array[I].n_times_set for the new registers. */
9734 for (i = old_nregs; i < regs->num; i++)
9735 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9737 free (last_set);
9740 /* Returns the number of real INSNs in the LOOP. */
9742 static int
9743 count_insns_in_loop (loop)
9744 const struct loop *loop;
9746 int count = 0;
9747 rtx insn;
9749 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9750 insn = NEXT_INSN (insn))
9751 if (INSN_P (insn))
9752 ++count;
9754 return count;
9757 /* Move MEMs into registers for the duration of the loop. */
9759 static void
9760 load_mems (loop)
9761 const struct loop *loop;
9763 struct loop_info *loop_info = LOOP_INFO (loop);
9764 struct loop_regs *regs = LOOP_REGS (loop);
9765 int maybe_never = 0;
9766 int i;
9767 rtx p, prev_ebb_head;
9768 rtx label = NULL_RTX;
9769 rtx end_label;
9770 /* Nonzero if the next instruction may never be executed. */
9771 int next_maybe_never = 0;
9772 unsigned int last_max_reg = max_reg_num ();
9774 if (loop_info->mems_idx == 0)
9775 return;
9777 /* We cannot use next_label here because it skips over normal insns. */
9778 end_label = next_nonnote_insn (loop->end);
9779 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9780 end_label = NULL_RTX;
9782 /* Check to see if it's possible that some instructions in the loop are
9783 never executed. Also check if there is a goto out of the loop other
9784 than right after the end of the loop. */
9785 for (p = next_insn_in_loop (loop, loop->scan_start);
9786 p != NULL_RTX;
9787 p = next_insn_in_loop (loop, p))
9789 if (GET_CODE (p) == CODE_LABEL)
9790 maybe_never = 1;
9791 else if (GET_CODE (p) == JUMP_INSN
9792 /* If we enter the loop in the middle, and scan
9793 around to the beginning, don't set maybe_never
9794 for that. This must be an unconditional jump,
9795 otherwise the code at the top of the loop might
9796 never be executed. Unconditional jumps are
9797 followed a by barrier then loop end. */
9798 && ! (GET_CODE (p) == JUMP_INSN
9799 && JUMP_LABEL (p) == loop->top
9800 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9801 && any_uncondjump_p (p)))
9803 /* If this is a jump outside of the loop but not right
9804 after the end of the loop, we would have to emit new fixup
9805 sequences for each such label. */
9806 if (/* If we can't tell where control might go when this
9807 JUMP_INSN is executed, we must be conservative. */
9808 !JUMP_LABEL (p)
9809 || (JUMP_LABEL (p) != end_label
9810 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9811 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9812 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9813 return;
9815 if (!any_condjump_p (p))
9816 /* Something complicated. */
9817 maybe_never = 1;
9818 else
9819 /* If there are any more instructions in the loop, they
9820 might not be reached. */
9821 next_maybe_never = 1;
9823 else if (next_maybe_never)
9824 maybe_never = 1;
9827 /* Find start of the extended basic block that enters the loop. */
9828 for (p = loop->start;
9829 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9830 p = PREV_INSN (p))
9832 prev_ebb_head = p;
9834 cselib_init ();
9836 /* Build table of mems that get set to constant values before the
9837 loop. */
9838 for (; p != loop->start; p = NEXT_INSN (p))
9839 cselib_process_insn (p);
9841 /* Actually move the MEMs. */
9842 for (i = 0; i < loop_info->mems_idx; ++i)
9844 regset_head load_copies;
9845 regset_head store_copies;
9846 int written = 0;
9847 rtx reg;
9848 rtx mem = loop_info->mems[i].mem;
9849 rtx mem_list_entry;
9851 if (MEM_VOLATILE_P (mem)
9852 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9853 /* There's no telling whether or not MEM is modified. */
9854 loop_info->mems[i].optimize = 0;
9856 /* Go through the MEMs written to in the loop to see if this
9857 one is aliased by one of them. */
9858 mem_list_entry = loop_info->store_mems;
9859 while (mem_list_entry)
9861 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9862 written = 1;
9863 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9864 mem, rtx_varies_p))
9866 /* MEM is indeed aliased by this store. */
9867 loop_info->mems[i].optimize = 0;
9868 break;
9870 mem_list_entry = XEXP (mem_list_entry, 1);
9873 if (flag_float_store && written
9874 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9875 loop_info->mems[i].optimize = 0;
9877 /* If this MEM is written to, we must be sure that there
9878 are no reads from another MEM that aliases this one. */
9879 if (loop_info->mems[i].optimize && written)
9881 int j;
9883 for (j = 0; j < loop_info->mems_idx; ++j)
9885 if (j == i)
9886 continue;
9887 else if (true_dependence (mem,
9888 VOIDmode,
9889 loop_info->mems[j].mem,
9890 rtx_varies_p))
9892 /* It's not safe to hoist loop_info->mems[i] out of
9893 the loop because writes to it might not be
9894 seen by reads from loop_info->mems[j]. */
9895 loop_info->mems[i].optimize = 0;
9896 break;
9901 if (maybe_never && may_trap_p (mem))
9902 /* We can't access the MEM outside the loop; it might
9903 cause a trap that wouldn't have happened otherwise. */
9904 loop_info->mems[i].optimize = 0;
9906 if (!loop_info->mems[i].optimize)
9907 /* We thought we were going to lift this MEM out of the
9908 loop, but later discovered that we could not. */
9909 continue;
9911 INIT_REG_SET (&load_copies);
9912 INIT_REG_SET (&store_copies);
9914 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9915 order to keep scan_loop from moving stores to this MEM
9916 out of the loop just because this REG is neither a
9917 user-variable nor used in the loop test. */
9918 reg = gen_reg_rtx (GET_MODE (mem));
9919 REG_USERVAR_P (reg) = 1;
9920 loop_info->mems[i].reg = reg;
9922 /* Now, replace all references to the MEM with the
9923 corresponding pseudos. */
9924 maybe_never = 0;
9925 for (p = next_insn_in_loop (loop, loop->scan_start);
9926 p != NULL_RTX;
9927 p = next_insn_in_loop (loop, p))
9929 if (INSN_P (p))
9931 rtx set;
9933 set = single_set (p);
9935 /* See if this copies the mem into a register that isn't
9936 modified afterwards. We'll try to do copy propagation
9937 a little further on. */
9938 if (set
9939 /* @@@ This test is _way_ too conservative. */
9940 && ! maybe_never
9941 && GET_CODE (SET_DEST (set)) == REG
9942 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9943 && REGNO (SET_DEST (set)) < last_max_reg
9944 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9945 && rtx_equal_p (SET_SRC (set), mem))
9946 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9948 /* See if this copies the mem from a register that isn't
9949 modified afterwards. We'll try to remove the
9950 redundant copy later on by doing a little register
9951 renaming and copy propagation. This will help
9952 to untangle things for the BIV detection code. */
9953 if (set
9954 && ! maybe_never
9955 && GET_CODE (SET_SRC (set)) == REG
9956 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9957 && REGNO (SET_SRC (set)) < last_max_reg
9958 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9959 && rtx_equal_p (SET_DEST (set), mem))
9960 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9962 /* If this is a call which uses / clobbers this memory
9963 location, we must not change the interface here. */
9964 if (GET_CODE (p) == CALL_INSN
9965 && reg_mentioned_p (loop_info->mems[i].mem,
9966 CALL_INSN_FUNCTION_USAGE (p)))
9968 cancel_changes (0);
9969 loop_info->mems[i].optimize = 0;
9970 break;
9972 else
9973 /* Replace the memory reference with the shadow register. */
9974 replace_loop_mems (p, loop_info->mems[i].mem,
9975 loop_info->mems[i].reg);
9978 if (GET_CODE (p) == CODE_LABEL
9979 || GET_CODE (p) == JUMP_INSN)
9980 maybe_never = 1;
9983 if (! loop_info->mems[i].optimize)
9984 ; /* We found we couldn't do the replacement, so do nothing. */
9985 else if (! apply_change_group ())
9986 /* We couldn't replace all occurrences of the MEM. */
9987 loop_info->mems[i].optimize = 0;
9988 else
9990 /* Load the memory immediately before LOOP->START, which is
9991 the NOTE_LOOP_BEG. */
9992 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9993 rtx set;
9994 rtx best = mem;
9995 int j;
9996 struct elt_loc_list *const_equiv = 0;
9998 if (e)
10000 struct elt_loc_list *equiv;
10001 struct elt_loc_list *best_equiv = 0;
10002 for (equiv = e->locs; equiv; equiv = equiv->next)
10004 if (CONSTANT_P (equiv->loc))
10005 const_equiv = equiv;
10006 else if (GET_CODE (equiv->loc) == REG
10007 /* Extending hard register lifetimes causes crash
10008 on SRC targets. Doing so on non-SRC is
10009 probably also not good idea, since we most
10010 probably have pseudoregister equivalence as
10011 well. */
10012 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10013 best_equiv = equiv;
10015 /* Use the constant equivalence if that is cheap enough. */
10016 if (! best_equiv)
10017 best_equiv = const_equiv;
10018 else if (const_equiv
10019 && (rtx_cost (const_equiv->loc, SET)
10020 <= rtx_cost (best_equiv->loc, SET)))
10022 best_equiv = const_equiv;
10023 const_equiv = 0;
10026 /* If best_equiv is nonzero, we know that MEM is set to a
10027 constant or register before the loop. We will use this
10028 knowledge to initialize the shadow register with that
10029 constant or reg rather than by loading from MEM. */
10030 if (best_equiv)
10031 best = copy_rtx (best_equiv->loc);
10034 set = gen_move_insn (reg, best);
10035 set = loop_insn_hoist (loop, set);
10036 if (REG_P (best))
10038 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10039 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10041 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10042 break;
10046 if (const_equiv)
10047 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10049 if (written)
10051 if (label == NULL_RTX)
10053 label = gen_label_rtx ();
10054 emit_label_after (label, loop->end);
10057 /* Store the memory immediately after END, which is
10058 the NOTE_LOOP_END. */
10059 set = gen_move_insn (copy_rtx (mem), reg);
10060 loop_insn_emit_after (loop, 0, label, set);
10063 if (loop_dump_stream)
10065 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10066 REGNO (reg), (written ? "r/w" : "r/o"));
10067 print_rtl (loop_dump_stream, mem);
10068 fputc ('\n', loop_dump_stream);
10071 /* Attempt a bit of copy propagation. This helps untangle the
10072 data flow, and enables {basic,general}_induction_var to find
10073 more bivs/givs. */
10074 EXECUTE_IF_SET_IN_REG_SET
10075 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10077 try_copy_prop (loop, reg, j);
10079 CLEAR_REG_SET (&load_copies);
10081 EXECUTE_IF_SET_IN_REG_SET
10082 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10084 try_swap_copy_prop (loop, reg, j);
10086 CLEAR_REG_SET (&store_copies);
10090 if (label != NULL_RTX && end_label != NULL_RTX)
10092 /* Now, we need to replace all references to the previous exit
10093 label with the new one. */
10094 rtx_pair rr;
10095 rr.r1 = end_label;
10096 rr.r2 = label;
10098 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10100 for_each_rtx (&p, replace_label, &rr);
10102 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10103 field. This is not handled by for_each_rtx because it doesn't
10104 handle unprinted ('0') fields. We need to update JUMP_LABEL
10105 because the immediately following unroll pass will use it.
10106 replace_label would not work anyways, because that only handles
10107 LABEL_REFs. */
10108 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10109 JUMP_LABEL (p) = label;
10113 cselib_finish ();
10116 /* For communication between note_reg_stored and its caller. */
10117 struct note_reg_stored_arg
10119 int set_seen;
10120 rtx reg;
10123 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10124 is equal to ARG. */
10125 static void
10126 note_reg_stored (x, setter, arg)
10127 rtx x, setter ATTRIBUTE_UNUSED;
10128 void *arg;
10130 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10131 if (t->reg == x)
10132 t->set_seen = 1;
10135 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10136 There must be exactly one insn that sets this pseudo; it will be
10137 deleted if all replacements succeed and we can prove that the register
10138 is not used after the loop. */
10140 static void
10141 try_copy_prop (loop, replacement, regno)
10142 const struct loop *loop;
10143 rtx replacement;
10144 unsigned int regno;
10146 /* This is the reg that we are copying from. */
10147 rtx reg_rtx = regno_reg_rtx[regno];
10148 rtx init_insn = 0;
10149 rtx insn;
10150 /* These help keep track of whether we replaced all uses of the reg. */
10151 int replaced_last = 0;
10152 int store_is_first = 0;
10154 for (insn = next_insn_in_loop (loop, loop->scan_start);
10155 insn != NULL_RTX;
10156 insn = next_insn_in_loop (loop, insn))
10158 rtx set;
10160 /* Only substitute within one extended basic block from the initializing
10161 insn. */
10162 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10163 break;
10165 if (! INSN_P (insn))
10166 continue;
10168 /* Is this the initializing insn? */
10169 set = single_set (insn);
10170 if (set
10171 && GET_CODE (SET_DEST (set)) == REG
10172 && REGNO (SET_DEST (set)) == regno)
10174 if (init_insn)
10175 abort ();
10177 init_insn = insn;
10178 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10179 store_is_first = 1;
10182 /* Only substitute after seeing the initializing insn. */
10183 if (init_insn && insn != init_insn)
10185 struct note_reg_stored_arg arg;
10187 replace_loop_regs (insn, reg_rtx, replacement);
10188 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10189 replaced_last = 1;
10191 /* Stop replacing when REPLACEMENT is modified. */
10192 arg.reg = replacement;
10193 arg.set_seen = 0;
10194 note_stores (PATTERN (insn), note_reg_stored, &arg);
10195 if (arg.set_seen)
10197 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10199 /* It is possible that we've turned previously valid REG_EQUAL to
10200 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10201 REPLACEMENT is modified, we get different meaning. */
10202 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10203 remove_note (insn, note);
10204 break;
10208 if (! init_insn)
10209 abort ();
10210 if (apply_change_group ())
10212 if (loop_dump_stream)
10213 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10214 if (store_is_first && replaced_last)
10216 rtx first;
10217 rtx retval_note;
10219 /* Assume we're just deleting INIT_INSN. */
10220 first = init_insn;
10221 /* Look for REG_RETVAL note. If we're deleting the end of
10222 the libcall sequence, the whole sequence can go. */
10223 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10224 /* If we found a REG_RETVAL note, find the first instruction
10225 in the sequence. */
10226 if (retval_note)
10227 first = XEXP (retval_note, 0);
10229 /* Delete the instructions. */
10230 loop_delete_insns (first, init_insn);
10232 if (loop_dump_stream)
10233 fprintf (loop_dump_stream, ".\n");
10237 /* Replace all the instructions from FIRST up to and including LAST
10238 with NOTE_INSN_DELETED notes. */
10240 static void
10241 loop_delete_insns (first, last)
10242 rtx first;
10243 rtx last;
10245 while (1)
10247 if (loop_dump_stream)
10248 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10249 INSN_UID (first));
10250 delete_insn (first);
10252 /* If this was the LAST instructions we're supposed to delete,
10253 we're done. */
10254 if (first == last)
10255 break;
10257 first = NEXT_INSN (first);
10261 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10262 loop LOOP if the order of the sets of these registers can be
10263 swapped. There must be exactly one insn within the loop that sets
10264 this pseudo followed immediately by a move insn that sets
10265 REPLACEMENT with REGNO. */
10266 static void
10267 try_swap_copy_prop (loop, replacement, regno)
10268 const struct loop *loop;
10269 rtx replacement;
10270 unsigned int regno;
10272 rtx insn;
10273 rtx set = NULL_RTX;
10274 unsigned int new_regno;
10276 new_regno = REGNO (replacement);
10278 for (insn = next_insn_in_loop (loop, loop->scan_start);
10279 insn != NULL_RTX;
10280 insn = next_insn_in_loop (loop, insn))
10282 /* Search for the insn that copies REGNO to NEW_REGNO? */
10283 if (INSN_P (insn)
10284 && (set = single_set (insn))
10285 && GET_CODE (SET_DEST (set)) == REG
10286 && REGNO (SET_DEST (set)) == new_regno
10287 && GET_CODE (SET_SRC (set)) == REG
10288 && REGNO (SET_SRC (set)) == regno)
10289 break;
10292 if (insn != NULL_RTX)
10294 rtx prev_insn;
10295 rtx prev_set;
10297 /* Some DEF-USE info would come in handy here to make this
10298 function more general. For now, just check the previous insn
10299 which is the most likely candidate for setting REGNO. */
10301 prev_insn = PREV_INSN (insn);
10303 if (INSN_P (insn)
10304 && (prev_set = single_set (prev_insn))
10305 && GET_CODE (SET_DEST (prev_set)) == REG
10306 && REGNO (SET_DEST (prev_set)) == regno)
10308 /* We have:
10309 (set (reg regno) (expr))
10310 (set (reg new_regno) (reg regno))
10312 so try converting this to:
10313 (set (reg new_regno) (expr))
10314 (set (reg regno) (reg new_regno))
10316 The former construct is often generated when a global
10317 variable used for an induction variable is shadowed by a
10318 register (NEW_REGNO). The latter construct improves the
10319 chances of GIV replacement and BIV elimination. */
10321 validate_change (prev_insn, &SET_DEST (prev_set),
10322 replacement, 1);
10323 validate_change (insn, &SET_DEST (set),
10324 SET_SRC (set), 1);
10325 validate_change (insn, &SET_SRC (set),
10326 replacement, 1);
10328 if (apply_change_group ())
10330 if (loop_dump_stream)
10331 fprintf (loop_dump_stream,
10332 " Swapped set of reg %d at %d with reg %d at %d.\n",
10333 regno, INSN_UID (insn),
10334 new_regno, INSN_UID (prev_insn));
10336 /* Update first use of REGNO. */
10337 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10338 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10340 /* Now perform copy propagation to hopefully
10341 remove all uses of REGNO within the loop. */
10342 try_copy_prop (loop, replacement, regno);
10348 /* Replace MEM with its associated pseudo register. This function is
10349 called from load_mems via for_each_rtx. DATA is actually a pointer
10350 to a structure describing the instruction currently being scanned
10351 and the MEM we are currently replacing. */
10353 static int
10354 replace_loop_mem (mem, data)
10355 rtx *mem;
10356 void *data;
10358 loop_replace_args *args = (loop_replace_args *) data;
10359 rtx m = *mem;
10361 if (m == NULL_RTX)
10362 return 0;
10364 switch (GET_CODE (m))
10366 case MEM:
10367 break;
10369 case CONST_DOUBLE:
10370 /* We're not interested in the MEM associated with a
10371 CONST_DOUBLE, so there's no need to traverse into one. */
10372 return -1;
10374 default:
10375 /* This is not a MEM. */
10376 return 0;
10379 if (!rtx_equal_p (args->match, m))
10380 /* This is not the MEM we are currently replacing. */
10381 return 0;
10383 /* Actually replace the MEM. */
10384 validate_change (args->insn, mem, args->replacement, 1);
10386 return 0;
10389 static void
10390 replace_loop_mems (insn, mem, reg)
10391 rtx insn;
10392 rtx mem;
10393 rtx reg;
10395 loop_replace_args args;
10397 args.insn = insn;
10398 args.match = mem;
10399 args.replacement = reg;
10401 for_each_rtx (&insn, replace_loop_mem, &args);
10404 /* Replace one register with another. Called through for_each_rtx; PX points
10405 to the rtx being scanned. DATA is actually a pointer to
10406 a structure of arguments. */
10408 static int
10409 replace_loop_reg (px, data)
10410 rtx *px;
10411 void *data;
10413 rtx x = *px;
10414 loop_replace_args *args = (loop_replace_args *) data;
10416 if (x == NULL_RTX)
10417 return 0;
10419 if (x == args->match)
10420 validate_change (args->insn, px, args->replacement, 1);
10422 return 0;
10425 static void
10426 replace_loop_regs (insn, reg, replacement)
10427 rtx insn;
10428 rtx reg;
10429 rtx replacement;
10431 loop_replace_args args;
10433 args.insn = insn;
10434 args.match = reg;
10435 args.replacement = replacement;
10437 for_each_rtx (&insn, replace_loop_reg, &args);
10440 /* Replace occurrences of the old exit label for the loop with the new
10441 one. DATA is an rtx_pair containing the old and new labels,
10442 respectively. */
10444 static int
10445 replace_label (x, data)
10446 rtx *x;
10447 void *data;
10449 rtx l = *x;
10450 rtx old_label = ((rtx_pair *) data)->r1;
10451 rtx new_label = ((rtx_pair *) data)->r2;
10453 if (l == NULL_RTX)
10454 return 0;
10456 if (GET_CODE (l) != LABEL_REF)
10457 return 0;
10459 if (XEXP (l, 0) != old_label)
10460 return 0;
10462 XEXP (l, 0) = new_label;
10463 ++LABEL_NUSES (new_label);
10464 --LABEL_NUSES (old_label);
10466 return 0;
10469 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10470 (ignored in the interim). */
10472 static rtx
10473 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10474 const struct loop *loop ATTRIBUTE_UNUSED;
10475 basic_block where_bb ATTRIBUTE_UNUSED;
10476 rtx where_insn;
10477 rtx pattern;
10479 return emit_insn_after (pattern, where_insn);
10483 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10484 in basic block WHERE_BB (ignored in the interim) within the loop
10485 otherwise hoist PATTERN into the loop pre-header. */
10488 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10489 const struct loop *loop;
10490 basic_block where_bb ATTRIBUTE_UNUSED;
10491 rtx where_insn;
10492 rtx pattern;
10494 if (! where_insn)
10495 return loop_insn_hoist (loop, pattern);
10496 return emit_insn_before (pattern, where_insn);
10500 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10501 WHERE_BB (ignored in the interim) within the loop. */
10503 static rtx
10504 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10505 const struct loop *loop ATTRIBUTE_UNUSED;
10506 basic_block where_bb ATTRIBUTE_UNUSED;
10507 rtx where_insn;
10508 rtx pattern;
10510 return emit_call_insn_before (pattern, where_insn);
10514 /* Hoist insn for PATTERN into the loop pre-header. */
10517 loop_insn_hoist (loop, pattern)
10518 const struct loop *loop;
10519 rtx pattern;
10521 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10525 /* Hoist call insn for PATTERN into the loop pre-header. */
10527 static rtx
10528 loop_call_insn_hoist (loop, pattern)
10529 const struct loop *loop;
10530 rtx pattern;
10532 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10536 /* Sink insn for PATTERN after the loop end. */
10539 loop_insn_sink (loop, pattern)
10540 const struct loop *loop;
10541 rtx pattern;
10543 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10546 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10547 and constant. Emit sequence of intructions to load it into REG */
10548 static rtx
10549 gen_load_of_final_value (reg, final_value)
10550 rtx reg, final_value;
10552 rtx seq;
10553 start_sequence ();
10554 final_value = force_operand (final_value, reg);
10555 if (final_value != reg)
10556 emit_move_insn (reg, final_value);
10557 seq = get_insns ();
10558 end_sequence ();
10559 return seq;
10562 /* If the loop has multiple exits, emit insn for PATTERN before the
10563 loop to ensure that it will always be executed no matter how the
10564 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10565 since this is slightly more efficient. */
10567 static rtx
10568 loop_insn_sink_or_swim (loop, pattern)
10569 const struct loop *loop;
10570 rtx pattern;
10572 if (loop->exit_count)
10573 return loop_insn_hoist (loop, pattern);
10574 else
10575 return loop_insn_sink (loop, pattern);
10578 static void
10579 loop_ivs_dump (loop, file, verbose)
10580 const struct loop *loop;
10581 FILE *file;
10582 int verbose;
10584 struct iv_class *bl;
10585 int iv_num = 0;
10587 if (! loop || ! file)
10588 return;
10590 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10591 iv_num++;
10593 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10595 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10597 loop_iv_class_dump (bl, file, verbose);
10598 fputc ('\n', file);
10603 static void
10604 loop_iv_class_dump (bl, file, verbose)
10605 const struct iv_class *bl;
10606 FILE *file;
10607 int verbose ATTRIBUTE_UNUSED;
10609 struct induction *v;
10610 rtx incr;
10611 int i;
10613 if (! bl || ! file)
10614 return;
10616 fprintf (file, "IV class for reg %d, benefit %d\n",
10617 bl->regno, bl->total_benefit);
10619 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10620 if (bl->initial_value)
10622 fprintf (file, ", init val: ");
10623 print_simple_rtl (file, bl->initial_value);
10625 if (bl->initial_test)
10627 fprintf (file, ", init test: ");
10628 print_simple_rtl (file, bl->initial_test);
10630 fputc ('\n', file);
10632 if (bl->final_value)
10634 fprintf (file, " Final val: ");
10635 print_simple_rtl (file, bl->final_value);
10636 fputc ('\n', file);
10639 if ((incr = biv_total_increment (bl)))
10641 fprintf (file, " Total increment: ");
10642 print_simple_rtl (file, incr);
10643 fputc ('\n', file);
10646 /* List the increments. */
10647 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10649 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10650 print_simple_rtl (file, v->add_val);
10651 fputc ('\n', file);
10654 /* List the givs. */
10655 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10657 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10658 i, INSN_UID (v->insn), v->benefit);
10659 if (v->giv_type == DEST_ADDR)
10660 print_simple_rtl (file, v->mem);
10661 else
10662 print_simple_rtl (file, single_set (v->insn));
10663 fputc ('\n', file);
10668 static void
10669 loop_biv_dump (v, file, verbose)
10670 const struct induction *v;
10671 FILE *file;
10672 int verbose;
10674 if (! v || ! file)
10675 return;
10677 fprintf (file,
10678 "Biv %d: insn %d",
10679 REGNO (v->dest_reg), INSN_UID (v->insn));
10680 fprintf (file, " const ");
10681 print_simple_rtl (file, v->add_val);
10683 if (verbose && v->final_value)
10685 fputc ('\n', file);
10686 fprintf (file, " final ");
10687 print_simple_rtl (file, v->final_value);
10690 fputc ('\n', file);
10694 static void
10695 loop_giv_dump (v, file, verbose)
10696 const struct induction *v;
10697 FILE *file;
10698 int verbose;
10700 if (! v || ! file)
10701 return;
10703 if (v->giv_type == DEST_REG)
10704 fprintf (file, "Giv %d: insn %d",
10705 REGNO (v->dest_reg), INSN_UID (v->insn));
10706 else
10707 fprintf (file, "Dest address: insn %d",
10708 INSN_UID (v->insn));
10710 fprintf (file, " src reg %d benefit %d",
10711 REGNO (v->src_reg), v->benefit);
10712 fprintf (file, " lifetime %d",
10713 v->lifetime);
10715 if (v->replaceable)
10716 fprintf (file, " replaceable");
10718 if (v->no_const_addval)
10719 fprintf (file, " ncav");
10721 if (v->ext_dependent)
10723 switch (GET_CODE (v->ext_dependent))
10725 case SIGN_EXTEND:
10726 fprintf (file, " ext se");
10727 break;
10728 case ZERO_EXTEND:
10729 fprintf (file, " ext ze");
10730 break;
10731 case TRUNCATE:
10732 fprintf (file, " ext tr");
10733 break;
10734 default:
10735 abort ();
10739 fputc ('\n', file);
10740 fprintf (file, " mult ");
10741 print_simple_rtl (file, v->mult_val);
10743 fputc ('\n', file);
10744 fprintf (file, " add ");
10745 print_simple_rtl (file, v->add_val);
10747 if (verbose && v->final_value)
10749 fputc ('\n', file);
10750 fprintf (file, " final ");
10751 print_simple_rtl (file, v->final_value);
10754 fputc ('\n', file);
10758 void
10759 debug_ivs (loop)
10760 const struct loop *loop;
10762 loop_ivs_dump (loop, stderr, 1);
10766 void
10767 debug_iv_class (bl)
10768 const struct iv_class *bl;
10770 loop_iv_class_dump (bl, stderr, 1);
10774 void
10775 debug_biv (v)
10776 const struct induction *v;
10778 loop_biv_dump (v, stderr, 1);
10782 void
10783 debug_giv (v)
10784 const struct induction *v;
10786 loop_giv_dump (v, stderr, 1);
10790 #define LOOP_BLOCK_NUM_1(INSN) \
10791 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10793 /* The notes do not have an assigned block, so look at the next insn. */
10794 #define LOOP_BLOCK_NUM(INSN) \
10795 ((INSN) ? (GET_CODE (INSN) == NOTE \
10796 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10797 : LOOP_BLOCK_NUM_1 (INSN)) \
10798 : -1)
10800 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10802 static void
10803 loop_dump_aux (loop, file, verbose)
10804 const struct loop *loop;
10805 FILE *file;
10806 int verbose ATTRIBUTE_UNUSED;
10808 rtx label;
10810 if (! loop || ! file)
10811 return;
10813 /* Print diagnostics to compare our concept of a loop with
10814 what the loop notes say. */
10815 if (! PREV_INSN (loop->first->head)
10816 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10817 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10818 != NOTE_INSN_LOOP_BEG)
10819 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10820 INSN_UID (PREV_INSN (loop->first->head)));
10821 if (! NEXT_INSN (loop->last->end)
10822 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10823 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10824 != NOTE_INSN_LOOP_END)
10825 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10826 INSN_UID (NEXT_INSN (loop->last->end)));
10828 if (loop->start)
10830 fprintf (file,
10831 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10832 LOOP_BLOCK_NUM (loop->start),
10833 LOOP_INSN_UID (loop->start),
10834 LOOP_BLOCK_NUM (loop->cont),
10835 LOOP_INSN_UID (loop->cont),
10836 LOOP_BLOCK_NUM (loop->cont),
10837 LOOP_INSN_UID (loop->cont),
10838 LOOP_BLOCK_NUM (loop->vtop),
10839 LOOP_INSN_UID (loop->vtop),
10840 LOOP_BLOCK_NUM (loop->end),
10841 LOOP_INSN_UID (loop->end));
10842 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10843 LOOP_BLOCK_NUM (loop->top),
10844 LOOP_INSN_UID (loop->top),
10845 LOOP_BLOCK_NUM (loop->scan_start),
10846 LOOP_INSN_UID (loop->scan_start));
10847 fprintf (file, ";; exit_count %d", loop->exit_count);
10848 if (loop->exit_count)
10850 fputs (", labels:", file);
10851 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10853 fprintf (file, " %d ",
10854 LOOP_INSN_UID (XEXP (label, 0)));
10857 fputs ("\n", file);
10859 /* This can happen when a marked loop appears as two nested loops,
10860 say from while (a || b) {}. The inner loop won't match
10861 the loop markers but the outer one will. */
10862 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10863 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10867 /* Call this function from the debugger to dump LOOP. */
10869 void
10870 debug_loop (loop)
10871 const struct loop *loop;
10873 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10876 /* Call this function from the debugger to dump LOOPS. */
10878 void
10879 debug_loops (loops)
10880 const struct loops *loops;
10882 flow_loops_dump (loops, stderr, loop_dump_aux, 1);