* loop.c: Fix a comment typo.
[official-gcc.git] / gcc / loop.c
blob6f8995a6648e564627340802dd0ccc1c0789fbbb
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "hard-reg-set.h"
44 #include "basic-block.h"
45 #include "insn-config.h"
46 #include "regs.h"
47 #include "recog.h"
48 #include "flags.h"
49 #include "real.h"
50 #include "loop.h"
51 #include "cselib.h"
52 #include "except.h"
53 #include "toplev.h"
54 #include "predict.h"
55 #include "insn-flags.h"
56 #include "optabs.h"
58 /* Not really meaningful values, but at least something. */
59 #ifndef SIMULTANEOUS_PREFETCHES
60 #define SIMULTANEOUS_PREFETCHES 3
61 #endif
62 #ifndef PREFETCH_BLOCK
63 #define PREFETCH_BLOCK 32
64 #endif
65 #ifndef HAVE_prefetch
66 #define HAVE_prefetch 0
67 #define CODE_FOR_prefetch 0
68 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
69 #endif
71 /* Give up the prefetch optimizations once we exceed a given threshhold.
72 It is unlikely that we would be able to optimize something in a loop
73 with so many detected prefetches. */
74 #define MAX_PREFETCHES 100
75 /* The number of prefetch blocks that are beneficial to fetch at once before
76 a loop with a known (and low) iteration count. */
77 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
78 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
79 since it is likely that the data are already in the cache. */
80 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* Parameterize some prefetch heuristics so they can be turned on and off
83 easily for performance testing on new architecures. These can be
84 defined in target-dependent files. */
86 /* Prefetch is worthwhile only when loads/stores are dense. */
87 #ifndef PREFETCH_ONLY_DENSE_MEM
88 #define PREFETCH_ONLY_DENSE_MEM 1
89 #endif
91 /* Define what we mean by "dense" loads and stores; This value divided by 256
92 is the minimum percentage of memory references that worth prefetching. */
93 #ifndef PREFETCH_DENSE_MEM
94 #define PREFETCH_DENSE_MEM 220
95 #endif
97 /* Do not prefetch for a loop whose iteration count is known to be low. */
98 #ifndef PREFETCH_NO_LOW_LOOPCNT
99 #define PREFETCH_NO_LOW_LOOPCNT 1
100 #endif
102 /* Define what we mean by a "low" iteration count. */
103 #ifndef PREFETCH_LOW_LOOPCNT
104 #define PREFETCH_LOW_LOOPCNT 32
105 #endif
107 /* Do not prefetch for a loop that contains a function call; such a loop is
108 probably not an internal loop. */
109 #ifndef PREFETCH_NO_CALL
110 #define PREFETCH_NO_CALL 1
111 #endif
113 /* Do not prefetch accesses with an extreme stride. */
114 #ifndef PREFETCH_NO_EXTREME_STRIDE
115 #define PREFETCH_NO_EXTREME_STRIDE 1
116 #endif
118 /* Define what we mean by an "extreme" stride. */
119 #ifndef PREFETCH_EXTREME_STRIDE
120 #define PREFETCH_EXTREME_STRIDE 4096
121 #endif
123 /* Define a limit to how far apart indices can be and still be merged
124 into a single prefetch. */
125 #ifndef PREFETCH_EXTREME_DIFFERENCE
126 #define PREFETCH_EXTREME_DIFFERENCE 4096
127 #endif
129 /* Issue prefetch instructions before the loop to fetch data to be used
130 in the first few loop iterations. */
131 #ifndef PREFETCH_BEFORE_LOOP
132 #define PREFETCH_BEFORE_LOOP 1
133 #endif
135 /* Do not handle reversed order prefetches (negative stride). */
136 #ifndef PREFETCH_NO_REVERSE_ORDER
137 #define PREFETCH_NO_REVERSE_ORDER 1
138 #endif
140 /* Prefetch even if the GIV is in conditional code. */
141 #ifndef PREFETCH_CONDITIONAL
142 #define PREFETCH_CONDITIONAL 1
143 #endif
145 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
146 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
148 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
149 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
150 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
152 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
153 ((REGNO) < FIRST_PSEUDO_REGISTER \
154 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
157 /* Vector mapping INSN_UIDs to luids.
158 The luids are like uids but increase monotonically always.
159 We use them to see whether a jump comes from outside a given loop. */
161 int *uid_luid;
163 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
164 number the insn is contained in. */
166 struct loop **uid_loop;
168 /* 1 + largest uid of any insn. */
170 int max_uid_for_loop;
172 /* 1 + luid of last insn. */
174 static int max_luid;
176 /* Number of loops detected in current function. Used as index to the
177 next few tables. */
179 static int max_loop_num;
181 /* Bound on pseudo register number before loop optimization.
182 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
183 unsigned int max_reg_before_loop;
185 /* The value to pass to the next call of reg_scan_update. */
186 static int loop_max_reg;
188 /* During the analysis of a loop, a chain of `struct movable's
189 is made to record all the movable insns found.
190 Then the entire chain can be scanned to decide which to move. */
192 struct movable
194 rtx insn; /* A movable insn */
195 rtx set_src; /* The expression this reg is set from. */
196 rtx set_dest; /* The destination of this SET. */
197 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
198 of any registers used within the LIBCALL. */
199 int consec; /* Number of consecutive following insns
200 that must be moved with this one. */
201 unsigned int regno; /* The register it sets */
202 short lifetime; /* lifetime of that register;
203 may be adjusted when matching movables
204 that load the same value are found. */
205 short savings; /* Number of insns we can move for this reg,
206 including other movables that force this
207 or match this one. */
208 unsigned int cond : 1; /* 1 if only conditionally movable */
209 unsigned int force : 1; /* 1 means MUST move this insn */
210 unsigned int global : 1; /* 1 means reg is live outside this loop */
211 /* If PARTIAL is 1, GLOBAL means something different:
212 that the reg is live outside the range from where it is set
213 to the following label. */
214 unsigned int done : 1; /* 1 inhibits further processing of this */
216 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
217 In particular, moving it does not make it
218 invariant. */
219 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
220 load SRC, rather than copying INSN. */
221 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
222 first insn of a consecutive sets group. */
223 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
224 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
225 that we should avoid changing when clearing
226 the rest of the reg. */
227 struct movable *match; /* First entry for same value */
228 struct movable *forces; /* An insn that must be moved if this is */
229 struct movable *next;
233 FILE *loop_dump_stream;
235 /* Forward declarations. */
237 static void invalidate_loops_containing_label PARAMS ((rtx));
238 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
239 static void mark_loop_jump PARAMS ((rtx, struct loop *));
240 static void prescan_loop PARAMS ((struct loop *));
241 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
242 static int consec_sets_invariant_p PARAMS ((const struct loop *,
243 rtx, int, rtx));
244 static int labels_in_range_p PARAMS ((rtx, int));
245 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct loop_movables *));
256 static void force_movables PARAMS ((struct loop_movables *));
257 static void combine_movables PARAMS ((struct loop_movables *,
258 struct loop_regs *));
259 static int num_unmoved_movables PARAMS ((const struct loop *));
260 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
261 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
262 struct loop_regs *));
263 static void add_label_notes PARAMS ((rtx, rtx));
264 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
265 int, int));
266 static void loop_movables_add PARAMS((struct loop_movables *,
267 struct movable *));
268 static void loop_movables_free PARAMS((struct loop_movables *));
269 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
270 static void loop_bivs_find PARAMS((struct loop *));
271 static void loop_bivs_init_find PARAMS((struct loop *));
272 static void loop_bivs_check PARAMS((struct loop *));
273 static void loop_givs_find PARAMS((struct loop *));
274 static void loop_givs_check PARAMS((struct loop *));
275 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
276 int, int));
277 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
278 struct induction *, rtx));
279 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
282 rtx *));
283 static void loop_ivs_free PARAMS((struct loop *));
284 static void strength_reduce PARAMS ((struct loop *, int));
285 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
286 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
287 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
288 static void record_biv PARAMS ((struct loop *, struct induction *,
289 rtx, rtx, rtx, rtx, rtx *,
290 int, int));
291 static void check_final_value PARAMS ((const struct loop *,
292 struct induction *));
293 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
294 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
295 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
296 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
297 static void record_giv PARAMS ((const struct loop *, struct induction *,
298 rtx, rtx, rtx, rtx, rtx, rtx, int,
299 enum g_types, int, int, rtx *));
300 static void update_giv_derive PARAMS ((const struct loop *, rtx));
301 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
302 struct loop_info *));
303 static int basic_induction_var PARAMS ((const struct loop *, rtx,
304 enum machine_mode, rtx, rtx,
305 rtx *, rtx *, rtx **));
306 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
307 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
308 rtx *, rtx *, rtx *, int, int *,
309 enum machine_mode));
310 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
311 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
312 static int check_dbra_loop PARAMS ((struct loop *, int));
313 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
314 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
315 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
316 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
317 static int product_cheap_p PARAMS ((rtx, rtx));
318 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
319 int, int, int));
320 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
321 struct iv_class *, int,
322 basic_block, rtx));
323 static int last_use_this_basic_block PARAMS ((rtx, rtx));
324 static void record_initial PARAMS ((rtx, rtx, void *));
325 static void update_reg_last_use PARAMS ((rtx, rtx));
326 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
327 static void loop_regs_scan PARAMS ((const struct loop *, int));
328 static int count_insns_in_loop PARAMS ((const struct loop *));
329 static void load_mems PARAMS ((const struct loop *));
330 static int insert_loop_mem PARAMS ((rtx *, void *));
331 static int replace_loop_mem PARAMS ((rtx *, void *));
332 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
333 static int replace_loop_reg PARAMS ((rtx *, void *));
334 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
335 static void note_reg_stored PARAMS ((rtx, rtx, void *));
336 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
337 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
338 unsigned int));
339 static int replace_label PARAMS ((rtx *, void *));
340 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
341 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
342 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
343 static void loop_regs_update PARAMS ((const struct loop *, rtx));
344 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
346 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
347 rtx, rtx));
348 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
349 basic_block, rtx, rtx));
350 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
351 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
353 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
354 static void loop_delete_insns PARAMS ((rtx, rtx));
355 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
356 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
357 void debug_ivs PARAMS ((const struct loop *));
358 void debug_iv_class PARAMS ((const struct iv_class *));
359 void debug_biv PARAMS ((const struct induction *));
360 void debug_giv PARAMS ((const struct induction *));
361 void debug_loop PARAMS ((const struct loop *));
362 void debug_loops PARAMS ((const struct loops *));
364 typedef struct rtx_pair
366 rtx r1;
367 rtx r2;
368 } rtx_pair;
370 typedef struct loop_replace_args
372 rtx match;
373 rtx replacement;
374 rtx insn;
375 } loop_replace_args;
377 /* Nonzero iff INSN is between START and END, inclusive. */
378 #define INSN_IN_RANGE_P(INSN, START, END) \
379 (INSN_UID (INSN) < max_uid_for_loop \
380 && INSN_LUID (INSN) >= INSN_LUID (START) \
381 && INSN_LUID (INSN) <= INSN_LUID (END))
383 /* Indirect_jump_in_function is computed once per function. */
384 static int indirect_jump_in_function;
385 static int indirect_jump_in_function_p PARAMS ((rtx));
387 static int compute_luids PARAMS ((rtx, rtx, int));
389 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
390 struct induction *,
391 rtx));
393 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
394 copy the value of the strength reduced giv to its original register. */
395 static int copy_cost;
397 /* Cost of using a register, to normalize the benefits of a giv. */
398 static int reg_address_cost;
400 void
401 init_loop ()
403 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
405 reg_address_cost = address_cost (reg, SImode);
407 copy_cost = COSTS_N_INSNS (1);
410 /* Compute the mapping from uids to luids.
411 LUIDs are numbers assigned to insns, like uids,
412 except that luids increase monotonically through the code.
413 Start at insn START and stop just before END. Assign LUIDs
414 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
415 static int
416 compute_luids (start, end, prev_luid)
417 rtx start, end;
418 int prev_luid;
420 int i;
421 rtx insn;
423 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
425 if (INSN_UID (insn) >= max_uid_for_loop)
426 continue;
427 /* Don't assign luids to line-number NOTEs, so that the distance in
428 luids between two insns is not affected by -g. */
429 if (GET_CODE (insn) != NOTE
430 || NOTE_LINE_NUMBER (insn) <= 0)
431 uid_luid[INSN_UID (insn)] = ++i;
432 else
433 /* Give a line number note the same luid as preceding insn. */
434 uid_luid[INSN_UID (insn)] = i;
436 return i + 1;
439 /* Entry point of this file. Perform loop optimization
440 on the current function. F is the first insn of the function
441 and DUMPFILE is a stream for output of a trace of actions taken
442 (or 0 if none should be output). */
444 void
445 loop_optimize (f, dumpfile, flags)
446 /* f is the first instruction of a chain of insns for one function */
447 rtx f;
448 FILE *dumpfile;
449 int flags;
451 rtx insn;
452 int i;
453 struct loops loops_data;
454 struct loops *loops = &loops_data;
455 struct loop_info *loops_info;
457 loop_dump_stream = dumpfile;
459 init_recog_no_volatile ();
461 max_reg_before_loop = max_reg_num ();
462 loop_max_reg = max_reg_before_loop;
464 regs_may_share = 0;
466 /* Count the number of loops. */
468 max_loop_num = 0;
469 for (insn = f; insn; insn = NEXT_INSN (insn))
471 if (GET_CODE (insn) == NOTE
472 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
473 max_loop_num++;
476 /* Don't waste time if no loops. */
477 if (max_loop_num == 0)
478 return;
480 loops->num = max_loop_num;
482 /* Get size to use for tables indexed by uids.
483 Leave some space for labels allocated by find_and_verify_loops. */
484 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
486 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
487 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
488 sizeof (struct loop *));
490 /* Allocate storage for array of loops. */
491 loops->array = (struct loop *)
492 xcalloc (loops->num, sizeof (struct loop));
494 /* Find and process each loop.
495 First, find them, and record them in order of their beginnings. */
496 find_and_verify_loops (f, loops);
498 /* Allocate and initialize auxiliary loop information. */
499 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
500 for (i = 0; i < loops->num; i++)
501 loops->array[i].aux = loops_info + i;
503 /* Now find all register lifetimes. This must be done after
504 find_and_verify_loops, because it might reorder the insns in the
505 function. */
506 reg_scan (f, max_reg_before_loop, 1);
508 /* This must occur after reg_scan so that registers created by gcse
509 will have entries in the register tables.
511 We could have added a call to reg_scan after gcse_main in toplev.c,
512 but moving this call to init_alias_analysis is more efficient. */
513 init_alias_analysis ();
515 /* See if we went too far. Note that get_max_uid already returns
516 one more that the maximum uid of all insn. */
517 if (get_max_uid () > max_uid_for_loop)
518 abort ();
519 /* Now reset it to the actual size we need. See above. */
520 max_uid_for_loop = get_max_uid ();
522 /* find_and_verify_loops has already called compute_luids, but it
523 might have rearranged code afterwards, so we need to recompute
524 the luids now. */
525 max_luid = compute_luids (f, NULL_RTX, 0);
527 /* Don't leave gaps in uid_luid for insns that have been
528 deleted. It is possible that the first or last insn
529 using some register has been deleted by cross-jumping.
530 Make sure that uid_luid for that former insn's uid
531 points to the general area where that insn used to be. */
532 for (i = 0; i < max_uid_for_loop; i++)
534 uid_luid[0] = uid_luid[i];
535 if (uid_luid[0] != 0)
536 break;
538 for (i = 0; i < max_uid_for_loop; i++)
539 if (uid_luid[i] == 0)
540 uid_luid[i] = uid_luid[i - 1];
542 /* Determine if the function has indirect jump. On some systems
543 this prevents low overhead loop instructions from being used. */
544 indirect_jump_in_function = indirect_jump_in_function_p (f);
546 /* Now scan the loops, last ones first, since this means inner ones are done
547 before outer ones. */
548 for (i = max_loop_num - 1; i >= 0; i--)
550 struct loop *loop = &loops->array[i];
552 if (! loop->invalid && loop->end)
553 scan_loop (loop, flags);
556 end_alias_analysis ();
558 /* Clean up. */
559 free (uid_luid);
560 free (uid_loop);
561 free (loops_info);
562 free (loops->array);
565 /* Returns the next insn, in execution order, after INSN. START and
566 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
567 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
568 insn-stream; it is used with loops that are entered near the
569 bottom. */
571 static rtx
572 next_insn_in_loop (loop, insn)
573 const struct loop *loop;
574 rtx insn;
576 insn = NEXT_INSN (insn);
578 if (insn == loop->end)
580 if (loop->top)
581 /* Go to the top of the loop, and continue there. */
582 insn = loop->top;
583 else
584 /* We're done. */
585 insn = NULL_RTX;
588 if (insn == loop->scan_start)
589 /* We're done. */
590 insn = NULL_RTX;
592 return insn;
595 /* Optimize one loop described by LOOP. */
597 /* ??? Could also move memory writes out of loops if the destination address
598 is invariant, the source is invariant, the memory write is not volatile,
599 and if we can prove that no read inside the loop can read this address
600 before the write occurs. If there is a read of this address after the
601 write, then we can also mark the memory read as invariant. */
603 static void
604 scan_loop (loop, flags)
605 struct loop *loop;
606 int flags;
608 struct loop_info *loop_info = LOOP_INFO (loop);
609 struct loop_regs *regs = LOOP_REGS (loop);
610 int i;
611 rtx loop_start = loop->start;
612 rtx loop_end = loop->end;
613 rtx p;
614 /* 1 if we are scanning insns that could be executed zero times. */
615 int maybe_never = 0;
616 /* 1 if we are scanning insns that might never be executed
617 due to a subroutine call which might exit before they are reached. */
618 int call_passed = 0;
619 /* Jump insn that enters the loop, or 0 if control drops in. */
620 rtx loop_entry_jump = 0;
621 /* Number of insns in the loop. */
622 int insn_count;
623 int tem;
624 rtx temp, update_start, update_end;
625 /* The SET from an insn, if it is the only SET in the insn. */
626 rtx set, set1;
627 /* Chain describing insns movable in current loop. */
628 struct loop_movables *movables = LOOP_MOVABLES (loop);
629 /* Ratio of extra register life span we can justify
630 for saving an instruction. More if loop doesn't call subroutines
631 since in that case saving an insn makes more difference
632 and more registers are available. */
633 int threshold;
634 /* Nonzero if we are scanning instructions in a sub-loop. */
635 int loop_depth = 0;
636 int in_libcall;
638 loop->top = 0;
640 movables->head = 0;
641 movables->last = 0;
643 /* Determine whether this loop starts with a jump down to a test at
644 the end. This will occur for a small number of loops with a test
645 that is too complex to duplicate in front of the loop.
647 We search for the first insn or label in the loop, skipping NOTEs.
648 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
649 (because we might have a loop executed only once that contains a
650 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
651 (in case we have a degenerate loop).
653 Note that if we mistakenly think that a loop is entered at the top
654 when, in fact, it is entered at the exit test, the only effect will be
655 slightly poorer optimization. Making the opposite error can generate
656 incorrect code. Since very few loops now start with a jump to the
657 exit test, the code here to detect that case is very conservative. */
659 for (p = NEXT_INSN (loop_start);
660 p != loop_end
661 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
662 && (GET_CODE (p) != NOTE
663 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
664 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
665 p = NEXT_INSN (p))
668 loop->scan_start = p;
670 /* If loop end is the end of the current function, then emit a
671 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
672 note insn. This is the position we use when sinking insns out of
673 the loop. */
674 if (NEXT_INSN (loop->end) != 0)
675 loop->sink = NEXT_INSN (loop->end);
676 else
677 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
679 /* Set up variables describing this loop. */
680 prescan_loop (loop);
681 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
683 /* If loop has a jump before the first label,
684 the true entry is the target of that jump.
685 Start scan from there.
686 But record in LOOP->TOP the place where the end-test jumps
687 back to so we can scan that after the end of the loop. */
688 if (GET_CODE (p) == JUMP_INSN)
690 loop_entry_jump = p;
692 /* Loop entry must be unconditional jump (and not a RETURN) */
693 if (any_uncondjump_p (p)
694 && JUMP_LABEL (p) != 0
695 /* Check to see whether the jump actually
696 jumps out of the loop (meaning it's no loop).
697 This case can happen for things like
698 do {..} while (0). If this label was generated previously
699 by loop, we can't tell anything about it and have to reject
700 the loop. */
701 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
703 loop->top = next_label (loop->scan_start);
704 loop->scan_start = JUMP_LABEL (p);
708 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
709 as required by loop_reg_used_before_p. So skip such loops. (This
710 test may never be true, but it's best to play it safe.)
712 Also, skip loops where we do not start scanning at a label. This
713 test also rejects loops starting with a JUMP_INSN that failed the
714 test above. */
716 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
717 || GET_CODE (loop->scan_start) != CODE_LABEL)
719 if (loop_dump_stream)
720 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
721 INSN_UID (loop_start), INSN_UID (loop_end));
722 return;
725 /* Allocate extra space for REGs that might be created by load_mems.
726 We allocate a little extra slop as well, in the hopes that we
727 won't have to reallocate the regs array. */
728 loop_regs_scan (loop, loop_info->mems_idx + 16);
729 insn_count = count_insns_in_loop (loop);
731 if (loop_dump_stream)
733 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
734 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
735 if (loop->cont)
736 fprintf (loop_dump_stream, "Continue at insn %d.\n",
737 INSN_UID (loop->cont));
740 /* Scan through the loop finding insns that are safe to move.
741 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
742 this reg will be considered invariant for subsequent insns.
743 We consider whether subsequent insns use the reg
744 in deciding whether it is worth actually moving.
746 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
747 and therefore it is possible that the insns we are scanning
748 would never be executed. At such times, we must make sure
749 that it is safe to execute the insn once instead of zero times.
750 When MAYBE_NEVER is 0, all insns will be executed at least once
751 so that is not a problem. */
753 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
754 p != NULL_RTX;
755 p = next_insn_in_loop (loop, p))
757 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
758 in_libcall--;
759 if (GET_CODE (p) == INSN)
761 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
762 if (temp)
763 in_libcall++;
764 if (! in_libcall
765 && (set = single_set (p))
766 && GET_CODE (SET_DEST (set)) == REG
767 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
768 && SET_DEST (set) != pic_offset_table_rtx
769 #endif
770 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
772 int tem1 = 0;
773 int tem2 = 0;
774 int move_insn = 0;
775 rtx src = SET_SRC (set);
776 rtx dependencies = 0;
778 /* Figure out what to use as a source of this insn. If a
779 REG_EQUIV note is given or if a REG_EQUAL note with a
780 constant operand is specified, use it as the source and
781 mark that we should move this insn by calling
782 emit_move_insn rather that duplicating the insn.
784 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
785 note is present. */
786 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
787 if (temp)
788 src = XEXP (temp, 0), move_insn = 1;
789 else
791 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
792 if (temp && CONSTANT_P (XEXP (temp, 0)))
793 src = XEXP (temp, 0), move_insn = 1;
794 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
796 src = XEXP (temp, 0);
797 /* A libcall block can use regs that don't appear in
798 the equivalent expression. To move the libcall,
799 we must move those regs too. */
800 dependencies = libcall_other_reg (p, src);
804 /* For parallels, add any possible uses to the depencies, as
805 we can't move the insn without resolving them first. */
806 if (GET_CODE (PATTERN (p)) == PARALLEL)
808 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
810 rtx x = XVECEXP (PATTERN (p), 0, i);
811 if (GET_CODE (x) == USE)
812 dependencies
813 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
814 dependencies);
818 /* Don't try to optimize a register that was made
819 by loop-optimization for an inner loop.
820 We don't know its life-span, so we can't compute
821 the benefit. */
822 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
824 else if (/* The register is used in basic blocks other
825 than the one where it is set (meaning that
826 something after this point in the loop might
827 depend on its value before the set). */
828 ! reg_in_basic_block_p (p, SET_DEST (set))
829 /* And the set is not guaranteed to be executed once
830 the loop starts, or the value before the set is
831 needed before the set occurs...
833 ??? Note we have quadratic behaviour here, mitigated
834 by the fact that the previous test will often fail for
835 large loops. Rather than re-scanning the entire loop
836 each time for register usage, we should build tables
837 of the register usage and use them here instead. */
838 && (maybe_never
839 || loop_reg_used_before_p (loop, set, p)))
840 /* It is unsafe to move the set.
842 This code used to consider it OK to move a set of a variable
843 which was not created by the user and not used in an exit
844 test.
845 That behavior is incorrect and was removed. */
847 else if ((tem = loop_invariant_p (loop, src))
848 && (dependencies == 0
849 || (tem2
850 = loop_invariant_p (loop, dependencies)) != 0)
851 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
852 || (tem1
853 = consec_sets_invariant_p
854 (loop, SET_DEST (set),
855 regs->array[REGNO (SET_DEST (set))].set_in_loop,
856 p)))
857 /* If the insn can cause a trap (such as divide by zero),
858 can't move it unless it's guaranteed to be executed
859 once loop is entered. Even a function call might
860 prevent the trap insn from being reached
861 (since it might exit!) */
862 && ! ((maybe_never || call_passed)
863 && may_trap_p (src)))
865 struct movable *m;
866 int regno = REGNO (SET_DEST (set));
868 /* A potential lossage is where we have a case where two insns
869 can be combined as long as they are both in the loop, but
870 we move one of them outside the loop. For large loops,
871 this can lose. The most common case of this is the address
872 of a function being called.
874 Therefore, if this register is marked as being used
875 exactly once if we are in a loop with calls
876 (a "large loop"), see if we can replace the usage of
877 this register with the source of this SET. If we can,
878 delete this insn.
880 Don't do this if P has a REG_RETVAL note or if we have
881 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
883 if (loop_info->has_call
884 && regs->array[regno].single_usage != 0
885 && regs->array[regno].single_usage != const0_rtx
886 && REGNO_FIRST_UID (regno) == INSN_UID (p)
887 && (REGNO_LAST_UID (regno)
888 == INSN_UID (regs->array[regno].single_usage))
889 && regs->array[regno].set_in_loop == 1
890 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
891 && ! side_effects_p (SET_SRC (set))
892 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
893 && (! SMALL_REGISTER_CLASSES
894 || (! (GET_CODE (SET_SRC (set)) == REG
895 && (REGNO (SET_SRC (set))
896 < FIRST_PSEUDO_REGISTER))))
897 /* This test is not redundant; SET_SRC (set) might be
898 a call-clobbered register and the life of REGNO
899 might span a call. */
900 && ! modified_between_p (SET_SRC (set), p,
901 regs->array[regno].single_usage)
902 && no_labels_between_p (p,
903 regs->array[regno].single_usage)
904 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
905 regs->array[regno].single_usage))
907 /* Replace any usage in a REG_EQUAL note. Must copy
908 the new source, so that we don't get rtx sharing
909 between the SET_SOURCE and REG_NOTES of insn p. */
910 REG_NOTES (regs->array[regno].single_usage)
911 = (replace_rtx
912 (REG_NOTES (regs->array[regno].single_usage),
913 SET_DEST (set), copy_rtx (SET_SRC (set))));
915 delete_insn (p);
916 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
917 i++)
918 regs->array[regno+i].set_in_loop = 0;
919 continue;
922 m = (struct movable *) xmalloc (sizeof (struct movable));
923 m->next = 0;
924 m->insn = p;
925 m->set_src = src;
926 m->dependencies = dependencies;
927 m->set_dest = SET_DEST (set);
928 m->force = 0;
929 m->consec
930 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
931 m->done = 0;
932 m->forces = 0;
933 m->partial = 0;
934 m->move_insn = move_insn;
935 m->move_insn_first = 0;
936 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
937 m->savemode = VOIDmode;
938 m->regno = regno;
939 /* Set M->cond if either loop_invariant_p
940 or consec_sets_invariant_p returned 2
941 (only conditionally invariant). */
942 m->cond = ((tem | tem1 | tem2) > 1);
943 m->global = LOOP_REG_GLOBAL_P (loop, regno);
944 m->match = 0;
945 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
946 m->savings = regs->array[regno].n_times_set;
947 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
948 m->savings += libcall_benefit (p);
949 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
950 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
951 /* Add M to the end of the chain MOVABLES. */
952 loop_movables_add (movables, m);
954 if (m->consec > 0)
956 /* It is possible for the first instruction to have a
957 REG_EQUAL note but a non-invariant SET_SRC, so we must
958 remember the status of the first instruction in case
959 the last instruction doesn't have a REG_EQUAL note. */
960 m->move_insn_first = m->move_insn;
962 /* Skip this insn, not checking REG_LIBCALL notes. */
963 p = next_nonnote_insn (p);
964 /* Skip the consecutive insns, if there are any. */
965 p = skip_consec_insns (p, m->consec);
966 /* Back up to the last insn of the consecutive group. */
967 p = prev_nonnote_insn (p);
969 /* We must now reset m->move_insn, m->is_equiv, and
970 possibly m->set_src to correspond to the effects of
971 all the insns. */
972 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
973 if (temp)
974 m->set_src = XEXP (temp, 0), m->move_insn = 1;
975 else
977 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
978 if (temp && CONSTANT_P (XEXP (temp, 0)))
979 m->set_src = XEXP (temp, 0), m->move_insn = 1;
980 else
981 m->move_insn = 0;
984 m->is_equiv
985 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
988 /* If this register is always set within a STRICT_LOW_PART
989 or set to zero, then its high bytes are constant.
990 So clear them outside the loop and within the loop
991 just load the low bytes.
992 We must check that the machine has an instruction to do so.
993 Also, if the value loaded into the register
994 depends on the same register, this cannot be done. */
995 else if (SET_SRC (set) == const0_rtx
996 && GET_CODE (NEXT_INSN (p)) == INSN
997 && (set1 = single_set (NEXT_INSN (p)))
998 && GET_CODE (set1) == SET
999 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1000 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1001 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1002 == SET_DEST (set))
1003 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1005 int regno = REGNO (SET_DEST (set));
1006 if (regs->array[regno].set_in_loop == 2)
1008 struct movable *m;
1009 m = (struct movable *) xmalloc (sizeof (struct movable));
1010 m->next = 0;
1011 m->insn = p;
1012 m->set_dest = SET_DEST (set);
1013 m->dependencies = 0;
1014 m->force = 0;
1015 m->consec = 0;
1016 m->done = 0;
1017 m->forces = 0;
1018 m->move_insn = 0;
1019 m->move_insn_first = 0;
1020 m->partial = 1;
1021 /* If the insn may not be executed on some cycles,
1022 we can't clear the whole reg; clear just high part.
1023 Not even if the reg is used only within this loop.
1024 Consider this:
1025 while (1)
1026 while (s != t) {
1027 if (foo ()) x = *s;
1028 use (x);
1030 Clearing x before the inner loop could clobber a value
1031 being saved from the last time around the outer loop.
1032 However, if the reg is not used outside this loop
1033 and all uses of the register are in the same
1034 basic block as the store, there is no problem.
1036 If this insn was made by loop, we don't know its
1037 INSN_LUID and hence must make a conservative
1038 assumption. */
1039 m->global = (INSN_UID (p) >= max_uid_for_loop
1040 || LOOP_REG_GLOBAL_P (loop, regno)
1041 || (labels_in_range_p
1042 (p, REGNO_FIRST_LUID (regno))));
1043 if (maybe_never && m->global)
1044 m->savemode = GET_MODE (SET_SRC (set1));
1045 else
1046 m->savemode = VOIDmode;
1047 m->regno = regno;
1048 m->cond = 0;
1049 m->match = 0;
1050 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1051 m->savings = 1;
1052 for (i = 0;
1053 i < (int) LOOP_REGNO_NREGS (regno, SET_DEST (set));
1054 i++)
1055 regs->array[regno+i].set_in_loop = -1;
1056 /* Add M to the end of the chain MOVABLES. */
1057 loop_movables_add (movables, m);
1062 /* Past a call insn, we get to insns which might not be executed
1063 because the call might exit. This matters for insns that trap.
1064 Constant and pure call insns always return, so they don't count. */
1065 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1066 call_passed = 1;
1067 /* Past a label or a jump, we get to insns for which we
1068 can't count on whether or how many times they will be
1069 executed during each iteration. Therefore, we can
1070 only move out sets of trivial variables
1071 (those not used after the loop). */
1072 /* Similar code appears twice in strength_reduce. */
1073 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1074 /* If we enter the loop in the middle, and scan around to the
1075 beginning, don't set maybe_never for that. This must be an
1076 unconditional jump, otherwise the code at the top of the
1077 loop might never be executed. Unconditional jumps are
1078 followed by a barrier then the loop_end. */
1079 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1080 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1081 && any_uncondjump_p (p)))
1082 maybe_never = 1;
1083 else if (GET_CODE (p) == NOTE)
1085 /* At the virtual top of a converted loop, insns are again known to
1086 be executed: logically, the loop begins here even though the exit
1087 code has been duplicated. */
1088 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1089 maybe_never = call_passed = 0;
1090 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1091 loop_depth++;
1092 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1093 loop_depth--;
1097 /* If one movable subsumes another, ignore that other. */
1099 ignore_some_movables (movables);
1101 /* For each movable insn, see if the reg that it loads
1102 leads when it dies right into another conditionally movable insn.
1103 If so, record that the second insn "forces" the first one,
1104 since the second can be moved only if the first is. */
1106 force_movables (movables);
1108 /* See if there are multiple movable insns that load the same value.
1109 If there are, make all but the first point at the first one
1110 through the `match' field, and add the priorities of them
1111 all together as the priority of the first. */
1113 combine_movables (movables, regs);
1115 /* Now consider each movable insn to decide whether it is worth moving.
1116 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1118 Generally this increases code size, so do not move moveables when
1119 optimizing for code size. */
1121 if (! optimize_size)
1123 move_movables (loop, movables, threshold, insn_count);
1125 /* Recalculate regs->array if move_movables has created new
1126 registers. */
1127 if (max_reg_num () > regs->num)
1129 loop_regs_scan (loop, 0);
1130 for (update_start = loop_start;
1131 PREV_INSN (update_start)
1132 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1133 update_start = PREV_INSN (update_start))
1135 update_end = NEXT_INSN (loop_end);
1137 reg_scan_update (update_start, update_end, loop_max_reg);
1138 loop_max_reg = max_reg_num ();
1142 /* Now candidates that still are negative are those not moved.
1143 Change regs->array[I].set_in_loop to indicate that those are not actually
1144 invariant. */
1145 for (i = 0; i < regs->num; i++)
1146 if (regs->array[i].set_in_loop < 0)
1147 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1149 /* Now that we've moved some things out of the loop, we might be able to
1150 hoist even more memory references. */
1151 load_mems (loop);
1153 /* Recalculate regs->array if load_mems has created new registers. */
1154 if (max_reg_num () > regs->num)
1155 loop_regs_scan (loop, 0);
1157 for (update_start = loop_start;
1158 PREV_INSN (update_start)
1159 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1160 update_start = PREV_INSN (update_start))
1162 update_end = NEXT_INSN (loop_end);
1164 reg_scan_update (update_start, update_end, loop_max_reg);
1165 loop_max_reg = max_reg_num ();
1167 if (flag_strength_reduce)
1169 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1170 /* Ensure our label doesn't go away. */
1171 LABEL_NUSES (update_end)++;
1173 strength_reduce (loop, flags);
1175 reg_scan_update (update_start, update_end, loop_max_reg);
1176 loop_max_reg = max_reg_num ();
1178 if (update_end && GET_CODE (update_end) == CODE_LABEL
1179 && --LABEL_NUSES (update_end) == 0)
1180 delete_related_insns (update_end);
1184 /* The movable information is required for strength reduction. */
1185 loop_movables_free (movables);
1187 free (regs->array);
1188 regs->array = 0;
1189 regs->num = 0;
1192 /* Add elements to *OUTPUT to record all the pseudo-regs
1193 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1195 void
1196 record_excess_regs (in_this, not_in_this, output)
1197 rtx in_this, not_in_this;
1198 rtx *output;
1200 enum rtx_code code;
1201 const char *fmt;
1202 int i;
1204 code = GET_CODE (in_this);
1206 switch (code)
1208 case PC:
1209 case CC0:
1210 case CONST_INT:
1211 case CONST_DOUBLE:
1212 case CONST:
1213 case SYMBOL_REF:
1214 case LABEL_REF:
1215 return;
1217 case REG:
1218 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1219 && ! reg_mentioned_p (in_this, not_in_this))
1220 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1221 return;
1223 default:
1224 break;
1227 fmt = GET_RTX_FORMAT (code);
1228 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1230 int j;
1232 switch (fmt[i])
1234 case 'E':
1235 for (j = 0; j < XVECLEN (in_this, i); j++)
1236 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1237 break;
1239 case 'e':
1240 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1241 break;
1246 /* Check what regs are referred to in the libcall block ending with INSN,
1247 aside from those mentioned in the equivalent value.
1248 If there are none, return 0.
1249 If there are one or more, return an EXPR_LIST containing all of them. */
1252 libcall_other_reg (insn, equiv)
1253 rtx insn, equiv;
1255 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1256 rtx p = XEXP (note, 0);
1257 rtx output = 0;
1259 /* First, find all the regs used in the libcall block
1260 that are not mentioned as inputs to the result. */
1262 while (p != insn)
1264 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1265 || GET_CODE (p) == CALL_INSN)
1266 record_excess_regs (PATTERN (p), equiv, &output);
1267 p = NEXT_INSN (p);
1270 return output;
1273 /* Return 1 if all uses of REG
1274 are between INSN and the end of the basic block. */
1276 static int
1277 reg_in_basic_block_p (insn, reg)
1278 rtx insn, reg;
1280 int regno = REGNO (reg);
1281 rtx p;
1283 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1284 return 0;
1286 /* Search this basic block for the already recorded last use of the reg. */
1287 for (p = insn; p; p = NEXT_INSN (p))
1289 switch (GET_CODE (p))
1291 case NOTE:
1292 break;
1294 case INSN:
1295 case CALL_INSN:
1296 /* Ordinary insn: if this is the last use, we win. */
1297 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1298 return 1;
1299 break;
1301 case JUMP_INSN:
1302 /* Jump insn: if this is the last use, we win. */
1303 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1304 return 1;
1305 /* Otherwise, it's the end of the basic block, so we lose. */
1306 return 0;
1308 case CODE_LABEL:
1309 case BARRIER:
1310 /* It's the end of the basic block, so we lose. */
1311 return 0;
1313 default:
1314 break;
1318 /* The "last use" that was recorded can't be found after the first
1319 use. This can happen when the last use was deleted while
1320 processing an inner loop, this inner loop was then completely
1321 unrolled, and the outer loop is always exited after the inner loop,
1322 so that everything after the first use becomes a single basic block. */
1323 return 1;
1326 /* Compute the benefit of eliminating the insns in the block whose
1327 last insn is LAST. This may be a group of insns used to compute a
1328 value directly or can contain a library call. */
1330 static int
1331 libcall_benefit (last)
1332 rtx last;
1334 rtx insn;
1335 int benefit = 0;
1337 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1338 insn != last; insn = NEXT_INSN (insn))
1340 if (GET_CODE (insn) == CALL_INSN)
1341 benefit += 10; /* Assume at least this many insns in a library
1342 routine. */
1343 else if (GET_CODE (insn) == INSN
1344 && GET_CODE (PATTERN (insn)) != USE
1345 && GET_CODE (PATTERN (insn)) != CLOBBER)
1346 benefit++;
1349 return benefit;
1352 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1354 static rtx
1355 skip_consec_insns (insn, count)
1356 rtx insn;
1357 int count;
1359 for (; count > 0; count--)
1361 rtx temp;
1363 /* If first insn of libcall sequence, skip to end. */
1364 /* Do this at start of loop, since INSN is guaranteed to
1365 be an insn here. */
1366 if (GET_CODE (insn) != NOTE
1367 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1368 insn = XEXP (temp, 0);
1371 insn = NEXT_INSN (insn);
1372 while (GET_CODE (insn) == NOTE);
1375 return insn;
1378 /* Ignore any movable whose insn falls within a libcall
1379 which is part of another movable.
1380 We make use of the fact that the movable for the libcall value
1381 was made later and so appears later on the chain. */
1383 static void
1384 ignore_some_movables (movables)
1385 struct loop_movables *movables;
1387 struct movable *m, *m1;
1389 for (m = movables->head; m; m = m->next)
1391 /* Is this a movable for the value of a libcall? */
1392 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1393 if (note)
1395 rtx insn;
1396 /* Check for earlier movables inside that range,
1397 and mark them invalid. We cannot use LUIDs here because
1398 insns created by loop.c for prior loops don't have LUIDs.
1399 Rather than reject all such insns from movables, we just
1400 explicitly check each insn in the libcall (since invariant
1401 libcalls aren't that common). */
1402 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1403 for (m1 = movables->head; m1 != m; m1 = m1->next)
1404 if (m1->insn == insn)
1405 m1->done = 1;
1410 /* For each movable insn, see if the reg that it loads
1411 leads when it dies right into another conditionally movable insn.
1412 If so, record that the second insn "forces" the first one,
1413 since the second can be moved only if the first is. */
1415 static void
1416 force_movables (movables)
1417 struct loop_movables *movables;
1419 struct movable *m, *m1;
1421 for (m1 = movables->head; m1; m1 = m1->next)
1422 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1423 if (!m1->partial && !m1->done)
1425 int regno = m1->regno;
1426 for (m = m1->next; m; m = m->next)
1427 /* ??? Could this be a bug? What if CSE caused the
1428 register of M1 to be used after this insn?
1429 Since CSE does not update regno_last_uid,
1430 this insn M->insn might not be where it dies.
1431 But very likely this doesn't matter; what matters is
1432 that M's reg is computed from M1's reg. */
1433 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1434 && !m->done)
1435 break;
1436 if (m != 0 && m->set_src == m1->set_dest
1437 /* If m->consec, m->set_src isn't valid. */
1438 && m->consec == 0)
1439 m = 0;
1441 /* Increase the priority of the moving the first insn
1442 since it permits the second to be moved as well. */
1443 if (m != 0)
1445 m->forces = m1;
1446 m1->lifetime += m->lifetime;
1447 m1->savings += m->savings;
1452 /* Find invariant expressions that are equal and can be combined into
1453 one register. */
1455 static void
1456 combine_movables (movables, regs)
1457 struct loop_movables *movables;
1458 struct loop_regs *regs;
1460 struct movable *m;
1461 char *matched_regs = (char *) xmalloc (regs->num);
1462 enum machine_mode mode;
1464 /* Regs that are set more than once are not allowed to match
1465 or be matched. I'm no longer sure why not. */
1466 /* Only pseudo registers are allowed to match or be matched,
1467 since move_movables does not validate the change. */
1468 /* Perhaps testing m->consec_sets would be more appropriate here? */
1470 for (m = movables->head; m; m = m->next)
1471 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1472 && m->regno >= FIRST_PSEUDO_REGISTER
1473 && !m->partial)
1475 struct movable *m1;
1476 int regno = m->regno;
1478 memset (matched_regs, 0, regs->num);
1479 matched_regs[regno] = 1;
1481 /* We want later insns to match the first one. Don't make the first
1482 one match any later ones. So start this loop at m->next. */
1483 for (m1 = m->next; m1; m1 = m1->next)
1484 if (m != m1 && m1->match == 0
1485 && regs->array[m1->regno].n_times_set == 1
1486 && m1->regno >= FIRST_PSEUDO_REGISTER
1487 /* A reg used outside the loop mustn't be eliminated. */
1488 && !m1->global
1489 /* A reg used for zero-extending mustn't be eliminated. */
1490 && !m1->partial
1491 && (matched_regs[m1->regno]
1494 /* Can combine regs with different modes loaded from the
1495 same constant only if the modes are the same or
1496 if both are integer modes with M wider or the same
1497 width as M1. The check for integer is redundant, but
1498 safe, since the only case of differing destination
1499 modes with equal sources is when both sources are
1500 VOIDmode, i.e., CONST_INT. */
1501 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1502 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1503 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1504 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1505 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1506 /* See if the source of M1 says it matches M. */
1507 && ((GET_CODE (m1->set_src) == REG
1508 && matched_regs[REGNO (m1->set_src)])
1509 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1510 movables, regs))))
1511 && ((m->dependencies == m1->dependencies)
1512 || rtx_equal_p (m->dependencies, m1->dependencies)))
1514 m->lifetime += m1->lifetime;
1515 m->savings += m1->savings;
1516 m1->done = 1;
1517 m1->match = m;
1518 matched_regs[m1->regno] = 1;
1522 /* Now combine the regs used for zero-extension.
1523 This can be done for those not marked `global'
1524 provided their lives don't overlap. */
1526 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1527 mode = GET_MODE_WIDER_MODE (mode))
1529 struct movable *m0 = 0;
1531 /* Combine all the registers for extension from mode MODE.
1532 Don't combine any that are used outside this loop. */
1533 for (m = movables->head; m; m = m->next)
1534 if (m->partial && ! m->global
1535 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1537 struct movable *m1;
1539 int first = REGNO_FIRST_LUID (m->regno);
1540 int last = REGNO_LAST_LUID (m->regno);
1542 if (m0 == 0)
1544 /* First one: don't check for overlap, just record it. */
1545 m0 = m;
1546 continue;
1549 /* Make sure they extend to the same mode.
1550 (Almost always true.) */
1551 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1552 continue;
1554 /* We already have one: check for overlap with those
1555 already combined together. */
1556 for (m1 = movables->head; m1 != m; m1 = m1->next)
1557 if (m1 == m0 || (m1->partial && m1->match == m0))
1558 if (! (REGNO_FIRST_LUID (m1->regno) > last
1559 || REGNO_LAST_LUID (m1->regno) < first))
1560 goto overlap;
1562 /* No overlap: we can combine this with the others. */
1563 m0->lifetime += m->lifetime;
1564 m0->savings += m->savings;
1565 m->done = 1;
1566 m->match = m0;
1568 overlap:
1573 /* Clean up. */
1574 free (matched_regs);
1577 /* Returns the number of movable instructions in LOOP that were not
1578 moved outside the loop. */
1580 static int
1581 num_unmoved_movables (loop)
1582 const struct loop *loop;
1584 int num = 0;
1585 struct movable *m;
1587 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1588 if (!m->done)
1589 ++num;
1591 return num;
1595 /* Return 1 if regs X and Y will become the same if moved. */
1597 static int
1598 regs_match_p (x, y, movables)
1599 rtx x, y;
1600 struct loop_movables *movables;
1602 unsigned int xn = REGNO (x);
1603 unsigned int yn = REGNO (y);
1604 struct movable *mx, *my;
1606 for (mx = movables->head; mx; mx = mx->next)
1607 if (mx->regno == xn)
1608 break;
1610 for (my = movables->head; my; my = my->next)
1611 if (my->regno == yn)
1612 break;
1614 return (mx && my
1615 && ((mx->match == my->match && mx->match != 0)
1616 || mx->match == my
1617 || mx == my->match));
1620 /* Return 1 if X and Y are identical-looking rtx's.
1621 This is the Lisp function EQUAL for rtx arguments.
1623 If two registers are matching movables or a movable register and an
1624 equivalent constant, consider them equal. */
1626 static int
1627 rtx_equal_for_loop_p (x, y, movables, regs)
1628 rtx x, y;
1629 struct loop_movables *movables;
1630 struct loop_regs *regs;
1632 int i;
1633 int j;
1634 struct movable *m;
1635 enum rtx_code code;
1636 const char *fmt;
1638 if (x == y)
1639 return 1;
1640 if (x == 0 || y == 0)
1641 return 0;
1643 code = GET_CODE (x);
1645 /* If we have a register and a constant, they may sometimes be
1646 equal. */
1647 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1648 && CONSTANT_P (y))
1650 for (m = movables->head; m; m = m->next)
1651 if (m->move_insn && m->regno == REGNO (x)
1652 && rtx_equal_p (m->set_src, y))
1653 return 1;
1655 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1656 && CONSTANT_P (x))
1658 for (m = movables->head; m; m = m->next)
1659 if (m->move_insn && m->regno == REGNO (y)
1660 && rtx_equal_p (m->set_src, x))
1661 return 1;
1664 /* Otherwise, rtx's of different codes cannot be equal. */
1665 if (code != GET_CODE (y))
1666 return 0;
1668 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1669 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1671 if (GET_MODE (x) != GET_MODE (y))
1672 return 0;
1674 /* These three types of rtx's can be compared nonrecursively. */
1675 if (code == REG)
1676 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1678 if (code == LABEL_REF)
1679 return XEXP (x, 0) == XEXP (y, 0);
1680 if (code == SYMBOL_REF)
1681 return XSTR (x, 0) == XSTR (y, 0);
1683 /* Compare the elements. If any pair of corresponding elements
1684 fail to match, return 0 for the whole things. */
1686 fmt = GET_RTX_FORMAT (code);
1687 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1689 switch (fmt[i])
1691 case 'w':
1692 if (XWINT (x, i) != XWINT (y, i))
1693 return 0;
1694 break;
1696 case 'i':
1697 if (XINT (x, i) != XINT (y, i))
1698 return 0;
1699 break;
1701 case 'E':
1702 /* Two vectors must have the same length. */
1703 if (XVECLEN (x, i) != XVECLEN (y, i))
1704 return 0;
1706 /* And the corresponding elements must match. */
1707 for (j = 0; j < XVECLEN (x, i); j++)
1708 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1709 movables, regs) == 0)
1710 return 0;
1711 break;
1713 case 'e':
1714 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1715 == 0)
1716 return 0;
1717 break;
1719 case 's':
1720 if (strcmp (XSTR (x, i), XSTR (y, i)))
1721 return 0;
1722 break;
1724 case 'u':
1725 /* These are just backpointers, so they don't matter. */
1726 break;
1728 case '0':
1729 break;
1731 /* It is believed that rtx's at this level will never
1732 contain anything but integers and other rtx's,
1733 except for within LABEL_REFs and SYMBOL_REFs. */
1734 default:
1735 abort ();
1738 return 1;
1741 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1742 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1743 references is incremented once for each added note. */
1745 static void
1746 add_label_notes (x, insns)
1747 rtx x;
1748 rtx insns;
1750 enum rtx_code code = GET_CODE (x);
1751 int i, j;
1752 const char *fmt;
1753 rtx insn;
1755 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1757 /* This code used to ignore labels that referred to dispatch tables to
1758 avoid flow generating (slighly) worse code.
1760 We no longer ignore such label references (see LABEL_REF handling in
1761 mark_jump_label for additional information). */
1762 for (insn = insns; insn; insn = NEXT_INSN (insn))
1763 if (reg_mentioned_p (XEXP (x, 0), insn))
1765 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1766 REG_NOTES (insn));
1767 if (LABEL_P (XEXP (x, 0)))
1768 LABEL_NUSES (XEXP (x, 0))++;
1772 fmt = GET_RTX_FORMAT (code);
1773 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1775 if (fmt[i] == 'e')
1776 add_label_notes (XEXP (x, i), insns);
1777 else if (fmt[i] == 'E')
1778 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1779 add_label_notes (XVECEXP (x, i, j), insns);
1783 /* Scan MOVABLES, and move the insns that deserve to be moved.
1784 If two matching movables are combined, replace one reg with the
1785 other throughout. */
1787 static void
1788 move_movables (loop, movables, threshold, insn_count)
1789 struct loop *loop;
1790 struct loop_movables *movables;
1791 int threshold;
1792 int insn_count;
1794 struct loop_regs *regs = LOOP_REGS (loop);
1795 int nregs = regs->num;
1796 rtx new_start = 0;
1797 struct movable *m;
1798 rtx p;
1799 rtx loop_start = loop->start;
1800 rtx loop_end = loop->end;
1801 /* Map of pseudo-register replacements to handle combining
1802 when we move several insns that load the same value
1803 into different pseudo-registers. */
1804 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1805 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1807 for (m = movables->head; m; m = m->next)
1809 /* Describe this movable insn. */
1811 if (loop_dump_stream)
1813 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1814 INSN_UID (m->insn), m->regno, m->lifetime);
1815 if (m->consec > 0)
1816 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1817 if (m->cond)
1818 fprintf (loop_dump_stream, "cond ");
1819 if (m->force)
1820 fprintf (loop_dump_stream, "force ");
1821 if (m->global)
1822 fprintf (loop_dump_stream, "global ");
1823 if (m->done)
1824 fprintf (loop_dump_stream, "done ");
1825 if (m->move_insn)
1826 fprintf (loop_dump_stream, "move-insn ");
1827 if (m->match)
1828 fprintf (loop_dump_stream, "matches %d ",
1829 INSN_UID (m->match->insn));
1830 if (m->forces)
1831 fprintf (loop_dump_stream, "forces %d ",
1832 INSN_UID (m->forces->insn));
1835 /* Ignore the insn if it's already done (it matched something else).
1836 Otherwise, see if it is now safe to move. */
1838 if (!m->done
1839 && (! m->cond
1840 || (1 == loop_invariant_p (loop, m->set_src)
1841 && (m->dependencies == 0
1842 || 1 == loop_invariant_p (loop, m->dependencies))
1843 && (m->consec == 0
1844 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1845 m->consec + 1,
1846 m->insn))))
1847 && (! m->forces || m->forces->done))
1849 int regno;
1850 rtx p;
1851 int savings = m->savings;
1853 /* We have an insn that is safe to move.
1854 Compute its desirability. */
1856 p = m->insn;
1857 regno = m->regno;
1859 if (loop_dump_stream)
1860 fprintf (loop_dump_stream, "savings %d ", savings);
1862 if (regs->array[regno].moved_once && loop_dump_stream)
1863 fprintf (loop_dump_stream, "halved since already moved ");
1865 /* An insn MUST be moved if we already moved something else
1866 which is safe only if this one is moved too: that is,
1867 if already_moved[REGNO] is nonzero. */
1869 /* An insn is desirable to move if the new lifetime of the
1870 register is no more than THRESHOLD times the old lifetime.
1871 If it's not desirable, it means the loop is so big
1872 that moving won't speed things up much,
1873 and it is liable to make register usage worse. */
1875 /* It is also desirable to move if it can be moved at no
1876 extra cost because something else was already moved. */
1878 if (already_moved[regno]
1879 || flag_move_all_movables
1880 || (threshold * savings * m->lifetime) >=
1881 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1882 || (m->forces && m->forces->done
1883 && regs->array[m->forces->regno].n_times_set == 1))
1885 int count;
1886 struct movable *m1;
1887 rtx first = NULL_RTX;
1889 /* Now move the insns that set the reg. */
1891 if (m->partial && m->match)
1893 rtx newpat, i1;
1894 rtx r1, r2;
1895 /* Find the end of this chain of matching regs.
1896 Thus, we load each reg in the chain from that one reg.
1897 And that reg is loaded with 0 directly,
1898 since it has ->match == 0. */
1899 for (m1 = m; m1->match; m1 = m1->match);
1900 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1901 SET_DEST (PATTERN (m1->insn)));
1902 i1 = loop_insn_hoist (loop, newpat);
1904 /* Mark the moved, invariant reg as being allowed to
1905 share a hard reg with the other matching invariant. */
1906 REG_NOTES (i1) = REG_NOTES (m->insn);
1907 r1 = SET_DEST (PATTERN (m->insn));
1908 r2 = SET_DEST (PATTERN (m1->insn));
1909 regs_may_share
1910 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1911 gen_rtx_EXPR_LIST (VOIDmode, r2,
1912 regs_may_share));
1913 delete_insn (m->insn);
1915 if (new_start == 0)
1916 new_start = i1;
1918 if (loop_dump_stream)
1919 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1921 /* If we are to re-generate the item being moved with a
1922 new move insn, first delete what we have and then emit
1923 the move insn before the loop. */
1924 else if (m->move_insn)
1926 rtx i1, temp, seq;
1928 for (count = m->consec; count >= 0; count--)
1930 /* If this is the first insn of a library call sequence,
1931 something is very wrong. */
1932 if (GET_CODE (p) != NOTE
1933 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1934 abort ();
1936 /* If this is the last insn of a libcall sequence, then
1937 delete every insn in the sequence except the last.
1938 The last insn is handled in the normal manner. */
1939 if (GET_CODE (p) != NOTE
1940 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1942 temp = XEXP (temp, 0);
1943 while (temp != p)
1944 temp = delete_insn (temp);
1947 temp = p;
1948 p = delete_insn (p);
1950 /* simplify_giv_expr expects that it can walk the insns
1951 at m->insn forwards and see this old sequence we are
1952 tossing here. delete_insn does preserve the next
1953 pointers, but when we skip over a NOTE we must fix
1954 it up. Otherwise that code walks into the non-deleted
1955 insn stream. */
1956 while (p && GET_CODE (p) == NOTE)
1957 p = NEXT_INSN (temp) = NEXT_INSN (p);
1960 start_sequence ();
1961 emit_move_insn (m->set_dest, m->set_src);
1962 seq = get_insns ();
1963 end_sequence ();
1965 add_label_notes (m->set_src, seq);
1967 i1 = loop_insn_hoist (loop, seq);
1968 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1969 set_unique_reg_note (i1,
1970 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1971 m->set_src);
1973 if (loop_dump_stream)
1974 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1976 /* The more regs we move, the less we like moving them. */
1977 threshold -= 3;
1979 else
1981 for (count = m->consec; count >= 0; count--)
1983 rtx i1, temp;
1985 /* If first insn of libcall sequence, skip to end. */
1986 /* Do this at start of loop, since p is guaranteed to
1987 be an insn here. */
1988 if (GET_CODE (p) != NOTE
1989 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1990 p = XEXP (temp, 0);
1992 /* If last insn of libcall sequence, move all
1993 insns except the last before the loop. The last
1994 insn is handled in the normal manner. */
1995 if (GET_CODE (p) != NOTE
1996 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1998 rtx fn_address = 0;
1999 rtx fn_reg = 0;
2000 rtx fn_address_insn = 0;
2002 first = 0;
2003 for (temp = XEXP (temp, 0); temp != p;
2004 temp = NEXT_INSN (temp))
2006 rtx body;
2007 rtx n;
2008 rtx next;
2010 if (GET_CODE (temp) == NOTE)
2011 continue;
2013 body = PATTERN (temp);
2015 /* Find the next insn after TEMP,
2016 not counting USE or NOTE insns. */
2017 for (next = NEXT_INSN (temp); next != p;
2018 next = NEXT_INSN (next))
2019 if (! (GET_CODE (next) == INSN
2020 && GET_CODE (PATTERN (next)) == USE)
2021 && GET_CODE (next) != NOTE)
2022 break;
2024 /* If that is the call, this may be the insn
2025 that loads the function address.
2027 Extract the function address from the insn
2028 that loads it into a register.
2029 If this insn was cse'd, we get incorrect code.
2031 So emit a new move insn that copies the
2032 function address into the register that the
2033 call insn will use. flow.c will delete any
2034 redundant stores that we have created. */
2035 if (GET_CODE (next) == CALL_INSN
2036 && GET_CODE (body) == SET
2037 && GET_CODE (SET_DEST (body)) == REG
2038 && (n = find_reg_note (temp, REG_EQUAL,
2039 NULL_RTX)))
2041 fn_reg = SET_SRC (body);
2042 if (GET_CODE (fn_reg) != REG)
2043 fn_reg = SET_DEST (body);
2044 fn_address = XEXP (n, 0);
2045 fn_address_insn = temp;
2047 /* We have the call insn.
2048 If it uses the register we suspect it might,
2049 load it with the correct address directly. */
2050 if (GET_CODE (temp) == CALL_INSN
2051 && fn_address != 0
2052 && reg_referenced_p (fn_reg, body))
2053 loop_insn_emit_after (loop, 0, fn_address_insn,
2054 gen_move_insn
2055 (fn_reg, fn_address));
2057 if (GET_CODE (temp) == CALL_INSN)
2059 i1 = loop_call_insn_hoist (loop, body);
2060 /* Because the USAGE information potentially
2061 contains objects other than hard registers
2062 we need to copy it. */
2063 if (CALL_INSN_FUNCTION_USAGE (temp))
2064 CALL_INSN_FUNCTION_USAGE (i1)
2065 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2067 else
2068 i1 = loop_insn_hoist (loop, body);
2069 if (first == 0)
2070 first = i1;
2071 if (temp == fn_address_insn)
2072 fn_address_insn = i1;
2073 REG_NOTES (i1) = REG_NOTES (temp);
2074 REG_NOTES (temp) = NULL;
2075 delete_insn (temp);
2077 if (new_start == 0)
2078 new_start = first;
2080 if (m->savemode != VOIDmode)
2082 /* P sets REG to zero; but we should clear only
2083 the bits that are not covered by the mode
2084 m->savemode. */
2085 rtx reg = m->set_dest;
2086 rtx sequence;
2087 rtx tem;
2089 start_sequence ();
2090 tem = expand_simple_binop
2091 (GET_MODE (reg), AND, reg,
2092 GEN_INT ((((HOST_WIDE_INT) 1
2093 << GET_MODE_BITSIZE (m->savemode)))
2094 - 1),
2095 reg, 1, OPTAB_LIB_WIDEN);
2096 if (tem == 0)
2097 abort ();
2098 if (tem != reg)
2099 emit_move_insn (reg, tem);
2100 sequence = get_insns ();
2101 end_sequence ();
2102 i1 = loop_insn_hoist (loop, sequence);
2104 else if (GET_CODE (p) == CALL_INSN)
2106 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2107 /* Because the USAGE information potentially
2108 contains objects other than hard registers
2109 we need to copy it. */
2110 if (CALL_INSN_FUNCTION_USAGE (p))
2111 CALL_INSN_FUNCTION_USAGE (i1)
2112 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2114 else if (count == m->consec && m->move_insn_first)
2116 rtx seq;
2117 /* The SET_SRC might not be invariant, so we must
2118 use the REG_EQUAL note. */
2119 start_sequence ();
2120 emit_move_insn (m->set_dest, m->set_src);
2121 seq = get_insns ();
2122 end_sequence ();
2124 add_label_notes (m->set_src, seq);
2126 i1 = loop_insn_hoist (loop, seq);
2127 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2128 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2129 : REG_EQUAL, m->set_src);
2131 else
2132 i1 = loop_insn_hoist (loop, PATTERN (p));
2134 if (REG_NOTES (i1) == 0)
2136 REG_NOTES (i1) = REG_NOTES (p);
2137 REG_NOTES (p) = NULL;
2139 /* If there is a REG_EQUAL note present whose value
2140 is not loop invariant, then delete it, since it
2141 may cause problems with later optimization passes.
2142 It is possible for cse to create such notes
2143 like this as a result of record_jump_cond. */
2145 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2146 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2147 remove_note (i1, temp);
2150 if (new_start == 0)
2151 new_start = i1;
2153 if (loop_dump_stream)
2154 fprintf (loop_dump_stream, " moved to %d",
2155 INSN_UID (i1));
2157 /* If library call, now fix the REG_NOTES that contain
2158 insn pointers, namely REG_LIBCALL on FIRST
2159 and REG_RETVAL on I1. */
2160 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2162 XEXP (temp, 0) = first;
2163 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2164 XEXP (temp, 0) = i1;
2167 temp = p;
2168 delete_insn (p);
2169 p = NEXT_INSN (p);
2171 /* simplify_giv_expr expects that it can walk the insns
2172 at m->insn forwards and see this old sequence we are
2173 tossing here. delete_insn does preserve the next
2174 pointers, but when we skip over a NOTE we must fix
2175 it up. Otherwise that code walks into the non-deleted
2176 insn stream. */
2177 while (p && GET_CODE (p) == NOTE)
2178 p = NEXT_INSN (temp) = NEXT_INSN (p);
2181 /* The more regs we move, the less we like moving them. */
2182 threshold -= 3;
2185 /* Any other movable that loads the same register
2186 MUST be moved. */
2187 already_moved[regno] = 1;
2189 /* This reg has been moved out of one loop. */
2190 regs->array[regno].moved_once = 1;
2192 /* The reg set here is now invariant. */
2193 if (! m->partial)
2195 int i;
2196 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2197 regs->array[regno+i].set_in_loop = 0;
2200 m->done = 1;
2202 /* Change the length-of-life info for the register
2203 to say it lives at least the full length of this loop.
2204 This will help guide optimizations in outer loops. */
2206 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2207 /* This is the old insn before all the moved insns.
2208 We can't use the moved insn because it is out of range
2209 in uid_luid. Only the old insns have luids. */
2210 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2211 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2212 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2214 /* Combine with this moved insn any other matching movables. */
2216 if (! m->partial)
2217 for (m1 = movables->head; m1; m1 = m1->next)
2218 if (m1->match == m)
2220 rtx temp;
2222 /* Schedule the reg loaded by M1
2223 for replacement so that shares the reg of M.
2224 If the modes differ (only possible in restricted
2225 circumstances, make a SUBREG.
2227 Note this assumes that the target dependent files
2228 treat REG and SUBREG equally, including within
2229 GO_IF_LEGITIMATE_ADDRESS and in all the
2230 predicates since we never verify that replacing the
2231 original register with a SUBREG results in a
2232 recognizable insn. */
2233 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2234 reg_map[m1->regno] = m->set_dest;
2235 else
2236 reg_map[m1->regno]
2237 = gen_lowpart_common (GET_MODE (m1->set_dest),
2238 m->set_dest);
2240 /* Get rid of the matching insn
2241 and prevent further processing of it. */
2242 m1->done = 1;
2244 /* if library call, delete all insns. */
2245 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2246 NULL_RTX)))
2247 delete_insn_chain (XEXP (temp, 0), m1->insn);
2248 else
2249 delete_insn (m1->insn);
2251 /* Any other movable that loads the same register
2252 MUST be moved. */
2253 already_moved[m1->regno] = 1;
2255 /* The reg merged here is now invariant,
2256 if the reg it matches is invariant. */
2257 if (! m->partial)
2259 int i;
2260 for (i = 0;
2261 i < (int) LOOP_REGNO_NREGS (regno, m1->set_dest);
2262 i++)
2263 regs->array[m1->regno+i].set_in_loop = 0;
2267 else if (loop_dump_stream)
2268 fprintf (loop_dump_stream, "not desirable");
2270 else if (loop_dump_stream && !m->match)
2271 fprintf (loop_dump_stream, "not safe");
2273 if (loop_dump_stream)
2274 fprintf (loop_dump_stream, "\n");
2277 if (new_start == 0)
2278 new_start = loop_start;
2280 /* Go through all the instructions in the loop, making
2281 all the register substitutions scheduled in REG_MAP. */
2282 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2283 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2284 || GET_CODE (p) == CALL_INSN)
2286 replace_regs (PATTERN (p), reg_map, nregs, 0);
2287 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2288 INSN_CODE (p) = -1;
2291 /* Clean up. */
2292 free (reg_map);
2293 free (already_moved);
2297 static void
2298 loop_movables_add (movables, m)
2299 struct loop_movables *movables;
2300 struct movable *m;
2302 if (movables->head == 0)
2303 movables->head = m;
2304 else
2305 movables->last->next = m;
2306 movables->last = m;
2310 static void
2311 loop_movables_free (movables)
2312 struct loop_movables *movables;
2314 struct movable *m;
2315 struct movable *m_next;
2317 for (m = movables->head; m; m = m_next)
2319 m_next = m->next;
2320 free (m);
2324 #if 0
2325 /* Scan X and replace the address of any MEM in it with ADDR.
2326 REG is the address that MEM should have before the replacement. */
2328 static void
2329 replace_call_address (x, reg, addr)
2330 rtx x, reg, addr;
2332 enum rtx_code code;
2333 int i;
2334 const char *fmt;
2336 if (x == 0)
2337 return;
2338 code = GET_CODE (x);
2339 switch (code)
2341 case PC:
2342 case CC0:
2343 case CONST_INT:
2344 case CONST_DOUBLE:
2345 case CONST:
2346 case SYMBOL_REF:
2347 case LABEL_REF:
2348 case REG:
2349 return;
2351 case SET:
2352 /* Short cut for very common case. */
2353 replace_call_address (XEXP (x, 1), reg, addr);
2354 return;
2356 case CALL:
2357 /* Short cut for very common case. */
2358 replace_call_address (XEXP (x, 0), reg, addr);
2359 return;
2361 case MEM:
2362 /* If this MEM uses a reg other than the one we expected,
2363 something is wrong. */
2364 if (XEXP (x, 0) != reg)
2365 abort ();
2366 XEXP (x, 0) = addr;
2367 return;
2369 default:
2370 break;
2373 fmt = GET_RTX_FORMAT (code);
2374 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2376 if (fmt[i] == 'e')
2377 replace_call_address (XEXP (x, i), reg, addr);
2378 else if (fmt[i] == 'E')
2380 int j;
2381 for (j = 0; j < XVECLEN (x, i); j++)
2382 replace_call_address (XVECEXP (x, i, j), reg, addr);
2386 #endif
2388 /* Return the number of memory refs to addresses that vary
2389 in the rtx X. */
2391 static int
2392 count_nonfixed_reads (loop, x)
2393 const struct loop *loop;
2394 rtx x;
2396 enum rtx_code code;
2397 int i;
2398 const char *fmt;
2399 int value;
2401 if (x == 0)
2402 return 0;
2404 code = GET_CODE (x);
2405 switch (code)
2407 case PC:
2408 case CC0:
2409 case CONST_INT:
2410 case CONST_DOUBLE:
2411 case CONST:
2412 case SYMBOL_REF:
2413 case LABEL_REF:
2414 case REG:
2415 return 0;
2417 case MEM:
2418 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2419 + count_nonfixed_reads (loop, XEXP (x, 0)));
2421 default:
2422 break;
2425 value = 0;
2426 fmt = GET_RTX_FORMAT (code);
2427 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2429 if (fmt[i] == 'e')
2430 value += count_nonfixed_reads (loop, XEXP (x, i));
2431 if (fmt[i] == 'E')
2433 int j;
2434 for (j = 0; j < XVECLEN (x, i); j++)
2435 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2438 return value;
2441 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2442 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2443 `unknown_address_altered', `unknown_constant_address_altered', and
2444 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2445 list `store_mems' in LOOP. */
2447 static void
2448 prescan_loop (loop)
2449 struct loop *loop;
2451 int level = 1;
2452 rtx insn;
2453 struct loop_info *loop_info = LOOP_INFO (loop);
2454 rtx start = loop->start;
2455 rtx end = loop->end;
2456 /* The label after END. Jumping here is just like falling off the
2457 end of the loop. We use next_nonnote_insn instead of next_label
2458 as a hedge against the (pathological) case where some actual insn
2459 might end up between the two. */
2460 rtx exit_target = next_nonnote_insn (end);
2462 loop_info->has_indirect_jump = indirect_jump_in_function;
2463 loop_info->pre_header_has_call = 0;
2464 loop_info->has_call = 0;
2465 loop_info->has_nonconst_call = 0;
2466 loop_info->has_prefetch = 0;
2467 loop_info->has_volatile = 0;
2468 loop_info->has_tablejump = 0;
2469 loop_info->has_multiple_exit_targets = 0;
2470 loop->level = 1;
2472 loop_info->unknown_address_altered = 0;
2473 loop_info->unknown_constant_address_altered = 0;
2474 loop_info->store_mems = NULL_RTX;
2475 loop_info->first_loop_store_insn = NULL_RTX;
2476 loop_info->mems_idx = 0;
2477 loop_info->num_mem_sets = 0;
2480 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2481 insn = PREV_INSN (insn))
2483 if (GET_CODE (insn) == CALL_INSN)
2485 loop_info->pre_header_has_call = 1;
2486 break;
2490 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2491 insn = NEXT_INSN (insn))
2493 switch (GET_CODE (insn))
2495 case NOTE:
2496 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2498 ++level;
2499 /* Count number of loops contained in this one. */
2500 loop->level++;
2502 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2503 --level;
2504 break;
2506 case CALL_INSN:
2507 if (! CONST_OR_PURE_CALL_P (insn))
2509 loop_info->unknown_address_altered = 1;
2510 loop_info->has_nonconst_call = 1;
2512 else if (pure_call_p (insn))
2513 loop_info->has_nonconst_call = 1;
2514 loop_info->has_call = 1;
2515 if (can_throw_internal (insn))
2516 loop_info->has_multiple_exit_targets = 1;
2517 break;
2519 case JUMP_INSN:
2520 if (! loop_info->has_multiple_exit_targets)
2522 rtx set = pc_set (insn);
2524 if (set)
2526 rtx src = SET_SRC (set);
2527 rtx label1, label2;
2529 if (GET_CODE (src) == IF_THEN_ELSE)
2531 label1 = XEXP (src, 1);
2532 label2 = XEXP (src, 2);
2534 else
2536 label1 = src;
2537 label2 = NULL_RTX;
2542 if (label1 && label1 != pc_rtx)
2544 if (GET_CODE (label1) != LABEL_REF)
2546 /* Something tricky. */
2547 loop_info->has_multiple_exit_targets = 1;
2548 break;
2550 else if (XEXP (label1, 0) != exit_target
2551 && LABEL_OUTSIDE_LOOP_P (label1))
2553 /* A jump outside the current loop. */
2554 loop_info->has_multiple_exit_targets = 1;
2555 break;
2559 label1 = label2;
2560 label2 = NULL_RTX;
2562 while (label1);
2564 else
2566 /* A return, or something tricky. */
2567 loop_info->has_multiple_exit_targets = 1;
2570 /* FALLTHRU */
2572 case INSN:
2573 if (volatile_refs_p (PATTERN (insn)))
2574 loop_info->has_volatile = 1;
2576 if (GET_CODE (insn) == JUMP_INSN
2577 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2578 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2579 loop_info->has_tablejump = 1;
2581 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2582 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2583 loop_info->first_loop_store_insn = insn;
2585 if (flag_non_call_exceptions && can_throw_internal (insn))
2586 loop_info->has_multiple_exit_targets = 1;
2587 break;
2589 default:
2590 break;
2594 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2595 if (/* An exception thrown by a called function might land us
2596 anywhere. */
2597 ! loop_info->has_nonconst_call
2598 /* We don't want loads for MEMs moved to a location before the
2599 one at which their stack memory becomes allocated. (Note
2600 that this is not a problem for malloc, etc., since those
2601 require actual function calls. */
2602 && ! current_function_calls_alloca
2603 /* There are ways to leave the loop other than falling off the
2604 end. */
2605 && ! loop_info->has_multiple_exit_targets)
2606 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2607 insn = NEXT_INSN (insn))
2608 for_each_rtx (&insn, insert_loop_mem, loop_info);
2610 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2611 that loop_invariant_p and load_mems can use true_dependence
2612 to determine what is really clobbered. */
2613 if (loop_info->unknown_address_altered)
2615 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2617 loop_info->store_mems
2618 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2620 if (loop_info->unknown_constant_address_altered)
2622 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2624 RTX_UNCHANGING_P (mem) = 1;
2625 loop_info->store_mems
2626 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2630 /* Invalidate all loops containing LABEL. */
2632 static void
2633 invalidate_loops_containing_label (label)
2634 rtx label;
2636 struct loop *loop;
2637 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2638 loop->invalid = 1;
2641 /* Scan the function looking for loops. Record the start and end of each loop.
2642 Also mark as invalid loops any loops that contain a setjmp or are branched
2643 to from outside the loop. */
2645 static void
2646 find_and_verify_loops (f, loops)
2647 rtx f;
2648 struct loops *loops;
2650 rtx insn;
2651 rtx label;
2652 int num_loops;
2653 struct loop *current_loop;
2654 struct loop *next_loop;
2655 struct loop *loop;
2657 num_loops = loops->num;
2659 compute_luids (f, NULL_RTX, 0);
2661 /* If there are jumps to undefined labels,
2662 treat them as jumps out of any/all loops.
2663 This also avoids writing past end of tables when there are no loops. */
2664 uid_loop[0] = NULL;
2666 /* Find boundaries of loops, mark which loops are contained within
2667 loops, and invalidate loops that have setjmp. */
2669 num_loops = 0;
2670 current_loop = NULL;
2671 for (insn = f; insn; insn = NEXT_INSN (insn))
2673 if (GET_CODE (insn) == NOTE)
2674 switch (NOTE_LINE_NUMBER (insn))
2676 case NOTE_INSN_LOOP_BEG:
2677 next_loop = loops->array + num_loops;
2678 next_loop->num = num_loops;
2679 num_loops++;
2680 next_loop->start = insn;
2681 next_loop->outer = current_loop;
2682 current_loop = next_loop;
2683 break;
2685 case NOTE_INSN_LOOP_CONT:
2686 current_loop->cont = insn;
2687 break;
2689 case NOTE_INSN_LOOP_VTOP:
2690 current_loop->vtop = insn;
2691 break;
2693 case NOTE_INSN_LOOP_END:
2694 if (! current_loop)
2695 abort ();
2697 current_loop->end = insn;
2698 current_loop = current_loop->outer;
2699 break;
2701 default:
2702 break;
2705 if (GET_CODE (insn) == CALL_INSN
2706 && find_reg_note (insn, REG_SETJMP, NULL))
2708 /* In this case, we must invalidate our current loop and any
2709 enclosing loop. */
2710 for (loop = current_loop; loop; loop = loop->outer)
2712 loop->invalid = 1;
2713 if (loop_dump_stream)
2714 fprintf (loop_dump_stream,
2715 "\nLoop at %d ignored due to setjmp.\n",
2716 INSN_UID (loop->start));
2720 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2721 enclosing loop, but this doesn't matter. */
2722 uid_loop[INSN_UID (insn)] = current_loop;
2725 /* Any loop containing a label used in an initializer must be invalidated,
2726 because it can be jumped into from anywhere. */
2727 for (label = forced_labels; label; label = XEXP (label, 1))
2728 invalidate_loops_containing_label (XEXP (label, 0));
2730 /* Any loop containing a label used for an exception handler must be
2731 invalidated, because it can be jumped into from anywhere. */
2732 for_each_eh_label (invalidate_loops_containing_label);
2734 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2735 loop that it is not contained within, that loop is marked invalid.
2736 If any INSN or CALL_INSN uses a label's address, then the loop containing
2737 that label is marked invalid, because it could be jumped into from
2738 anywhere.
2740 Also look for blocks of code ending in an unconditional branch that
2741 exits the loop. If such a block is surrounded by a conditional
2742 branch around the block, move the block elsewhere (see below) and
2743 invert the jump to point to the code block. This may eliminate a
2744 label in our loop and will simplify processing by both us and a
2745 possible second cse pass. */
2747 for (insn = f; insn; insn = NEXT_INSN (insn))
2748 if (INSN_P (insn))
2750 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2752 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2754 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2755 if (note)
2756 invalidate_loops_containing_label (XEXP (note, 0));
2759 if (GET_CODE (insn) != JUMP_INSN)
2760 continue;
2762 mark_loop_jump (PATTERN (insn), this_loop);
2764 /* See if this is an unconditional branch outside the loop. */
2765 if (this_loop
2766 && (GET_CODE (PATTERN (insn)) == RETURN
2767 || (any_uncondjump_p (insn)
2768 && onlyjump_p (insn)
2769 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2770 != this_loop)))
2771 && get_max_uid () < max_uid_for_loop)
2773 rtx p;
2774 rtx our_next = next_real_insn (insn);
2775 rtx last_insn_to_move = NEXT_INSN (insn);
2776 struct loop *dest_loop;
2777 struct loop *outer_loop = NULL;
2779 /* Go backwards until we reach the start of the loop, a label,
2780 or a JUMP_INSN. */
2781 for (p = PREV_INSN (insn);
2782 GET_CODE (p) != CODE_LABEL
2783 && ! (GET_CODE (p) == NOTE
2784 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2785 && GET_CODE (p) != JUMP_INSN;
2786 p = PREV_INSN (p))
2789 /* Check for the case where we have a jump to an inner nested
2790 loop, and do not perform the optimization in that case. */
2792 if (JUMP_LABEL (insn))
2794 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2795 if (dest_loop)
2797 for (outer_loop = dest_loop; outer_loop;
2798 outer_loop = outer_loop->outer)
2799 if (outer_loop == this_loop)
2800 break;
2804 /* Make sure that the target of P is within the current loop. */
2806 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2807 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2808 outer_loop = this_loop;
2810 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2811 we have a block of code to try to move.
2813 We look backward and then forward from the target of INSN
2814 to find a BARRIER at the same loop depth as the target.
2815 If we find such a BARRIER, we make a new label for the start
2816 of the block, invert the jump in P and point it to that label,
2817 and move the block of code to the spot we found. */
2819 if (! outer_loop
2820 && GET_CODE (p) == JUMP_INSN
2821 && JUMP_LABEL (p) != 0
2822 /* Just ignore jumps to labels that were never emitted.
2823 These always indicate compilation errors. */
2824 && INSN_UID (JUMP_LABEL (p)) != 0
2825 && any_condjump_p (p) && onlyjump_p (p)
2826 && next_real_insn (JUMP_LABEL (p)) == our_next
2827 /* If it's not safe to move the sequence, then we
2828 mustn't try. */
2829 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2830 &last_insn_to_move))
2832 rtx target
2833 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2834 struct loop *target_loop = uid_loop[INSN_UID (target)];
2835 rtx loc, loc2;
2836 rtx tmp;
2838 /* Search for possible garbage past the conditional jumps
2839 and look for the last barrier. */
2840 for (tmp = last_insn_to_move;
2841 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2842 if (GET_CODE (tmp) == BARRIER)
2843 last_insn_to_move = tmp;
2845 for (loc = target; loc; loc = PREV_INSN (loc))
2846 if (GET_CODE (loc) == BARRIER
2847 /* Don't move things inside a tablejump. */
2848 && ((loc2 = next_nonnote_insn (loc)) == 0
2849 || GET_CODE (loc2) != CODE_LABEL
2850 || (loc2 = next_nonnote_insn (loc2)) == 0
2851 || GET_CODE (loc2) != JUMP_INSN
2852 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2853 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2854 && uid_loop[INSN_UID (loc)] == target_loop)
2855 break;
2857 if (loc == 0)
2858 for (loc = target; loc; loc = NEXT_INSN (loc))
2859 if (GET_CODE (loc) == BARRIER
2860 /* Don't move things inside a tablejump. */
2861 && ((loc2 = next_nonnote_insn (loc)) == 0
2862 || GET_CODE (loc2) != CODE_LABEL
2863 || (loc2 = next_nonnote_insn (loc2)) == 0
2864 || GET_CODE (loc2) != JUMP_INSN
2865 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2866 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2867 && uid_loop[INSN_UID (loc)] == target_loop)
2868 break;
2870 if (loc)
2872 rtx cond_label = JUMP_LABEL (p);
2873 rtx new_label = get_label_after (p);
2875 /* Ensure our label doesn't go away. */
2876 LABEL_NUSES (cond_label)++;
2878 /* Verify that uid_loop is large enough and that
2879 we can invert P. */
2880 if (invert_jump (p, new_label, 1))
2882 rtx q, r;
2884 /* If no suitable BARRIER was found, create a suitable
2885 one before TARGET. Since TARGET is a fall through
2886 path, we'll need to insert an jump around our block
2887 and add a BARRIER before TARGET.
2889 This creates an extra unconditional jump outside
2890 the loop. However, the benefits of removing rarely
2891 executed instructions from inside the loop usually
2892 outweighs the cost of the extra unconditional jump
2893 outside the loop. */
2894 if (loc == 0)
2896 rtx temp;
2898 temp = gen_jump (JUMP_LABEL (insn));
2899 temp = emit_jump_insn_before (temp, target);
2900 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2901 LABEL_NUSES (JUMP_LABEL (insn))++;
2902 loc = emit_barrier_before (target);
2905 /* Include the BARRIER after INSN and copy the
2906 block after LOC. */
2907 if (squeeze_notes (&new_label, &last_insn_to_move))
2908 abort ();
2909 reorder_insns (new_label, last_insn_to_move, loc);
2911 /* All those insns are now in TARGET_LOOP. */
2912 for (q = new_label;
2913 q != NEXT_INSN (last_insn_to_move);
2914 q = NEXT_INSN (q))
2915 uid_loop[INSN_UID (q)] = target_loop;
2917 /* The label jumped to by INSN is no longer a loop
2918 exit. Unless INSN does not have a label (e.g.,
2919 it is a RETURN insn), search loop->exit_labels
2920 to find its label_ref, and remove it. Also turn
2921 off LABEL_OUTSIDE_LOOP_P bit. */
2922 if (JUMP_LABEL (insn))
2924 for (q = 0, r = this_loop->exit_labels;
2926 q = r, r = LABEL_NEXTREF (r))
2927 if (XEXP (r, 0) == JUMP_LABEL (insn))
2929 LABEL_OUTSIDE_LOOP_P (r) = 0;
2930 if (q)
2931 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2932 else
2933 this_loop->exit_labels = LABEL_NEXTREF (r);
2934 break;
2937 for (loop = this_loop; loop && loop != target_loop;
2938 loop = loop->outer)
2939 loop->exit_count--;
2941 /* If we didn't find it, then something is
2942 wrong. */
2943 if (! r)
2944 abort ();
2947 /* P is now a jump outside the loop, so it must be put
2948 in loop->exit_labels, and marked as such.
2949 The easiest way to do this is to just call
2950 mark_loop_jump again for P. */
2951 mark_loop_jump (PATTERN (p), this_loop);
2953 /* If INSN now jumps to the insn after it,
2954 delete INSN. */
2955 if (JUMP_LABEL (insn) != 0
2956 && (next_real_insn (JUMP_LABEL (insn))
2957 == next_real_insn (insn)))
2958 delete_related_insns (insn);
2961 /* Continue the loop after where the conditional
2962 branch used to jump, since the only branch insn
2963 in the block (if it still remains) is an inter-loop
2964 branch and hence needs no processing. */
2965 insn = NEXT_INSN (cond_label);
2967 if (--LABEL_NUSES (cond_label) == 0)
2968 delete_related_insns (cond_label);
2970 /* This loop will be continued with NEXT_INSN (insn). */
2971 insn = PREV_INSN (insn);
2978 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2979 loops it is contained in, mark the target loop invalid.
2981 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2983 static void
2984 mark_loop_jump (x, loop)
2985 rtx x;
2986 struct loop *loop;
2988 struct loop *dest_loop;
2989 struct loop *outer_loop;
2990 int i;
2992 switch (GET_CODE (x))
2994 case PC:
2995 case USE:
2996 case CLOBBER:
2997 case REG:
2998 case MEM:
2999 case CONST_INT:
3000 case CONST_DOUBLE:
3001 case RETURN:
3002 return;
3004 case CONST:
3005 /* There could be a label reference in here. */
3006 mark_loop_jump (XEXP (x, 0), loop);
3007 return;
3009 case PLUS:
3010 case MINUS:
3011 case MULT:
3012 mark_loop_jump (XEXP (x, 0), loop);
3013 mark_loop_jump (XEXP (x, 1), loop);
3014 return;
3016 case LO_SUM:
3017 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3018 mark_loop_jump (XEXP (x, 1), loop);
3019 return;
3021 case SIGN_EXTEND:
3022 case ZERO_EXTEND:
3023 mark_loop_jump (XEXP (x, 0), loop);
3024 return;
3026 case LABEL_REF:
3027 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3029 /* Link together all labels that branch outside the loop. This
3030 is used by final_[bg]iv_value and the loop unrolling code. Also
3031 mark this LABEL_REF so we know that this branch should predict
3032 false. */
3034 /* A check to make sure the label is not in an inner nested loop,
3035 since this does not count as a loop exit. */
3036 if (dest_loop)
3038 for (outer_loop = dest_loop; outer_loop;
3039 outer_loop = outer_loop->outer)
3040 if (outer_loop == loop)
3041 break;
3043 else
3044 outer_loop = NULL;
3046 if (loop && ! outer_loop)
3048 LABEL_OUTSIDE_LOOP_P (x) = 1;
3049 LABEL_NEXTREF (x) = loop->exit_labels;
3050 loop->exit_labels = x;
3052 for (outer_loop = loop;
3053 outer_loop && outer_loop != dest_loop;
3054 outer_loop = outer_loop->outer)
3055 outer_loop->exit_count++;
3058 /* If this is inside a loop, but not in the current loop or one enclosed
3059 by it, it invalidates at least one loop. */
3061 if (! dest_loop)
3062 return;
3064 /* We must invalidate every nested loop containing the target of this
3065 label, except those that also contain the jump insn. */
3067 for (; dest_loop; dest_loop = dest_loop->outer)
3069 /* Stop when we reach a loop that also contains the jump insn. */
3070 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3071 if (dest_loop == outer_loop)
3072 return;
3074 /* If we get here, we know we need to invalidate a loop. */
3075 if (loop_dump_stream && ! dest_loop->invalid)
3076 fprintf (loop_dump_stream,
3077 "\nLoop at %d ignored due to multiple entry points.\n",
3078 INSN_UID (dest_loop->start));
3080 dest_loop->invalid = 1;
3082 return;
3084 case SET:
3085 /* If this is not setting pc, ignore. */
3086 if (SET_DEST (x) == pc_rtx)
3087 mark_loop_jump (SET_SRC (x), loop);
3088 return;
3090 case IF_THEN_ELSE:
3091 mark_loop_jump (XEXP (x, 1), loop);
3092 mark_loop_jump (XEXP (x, 2), loop);
3093 return;
3095 case PARALLEL:
3096 case ADDR_VEC:
3097 for (i = 0; i < XVECLEN (x, 0); i++)
3098 mark_loop_jump (XVECEXP (x, 0, i), loop);
3099 return;
3101 case ADDR_DIFF_VEC:
3102 for (i = 0; i < XVECLEN (x, 1); i++)
3103 mark_loop_jump (XVECEXP (x, 1, i), loop);
3104 return;
3106 default:
3107 /* Strictly speaking this is not a jump into the loop, only a possible
3108 jump out of the loop. However, we have no way to link the destination
3109 of this jump onto the list of exit labels. To be safe we mark this
3110 loop and any containing loops as invalid. */
3111 if (loop)
3113 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3115 if (loop_dump_stream && ! outer_loop->invalid)
3116 fprintf (loop_dump_stream,
3117 "\nLoop at %d ignored due to unknown exit jump.\n",
3118 INSN_UID (outer_loop->start));
3119 outer_loop->invalid = 1;
3122 return;
3126 /* Return nonzero if there is a label in the range from
3127 insn INSN to and including the insn whose luid is END
3128 INSN must have an assigned luid (i.e., it must not have
3129 been previously created by loop.c). */
3131 static int
3132 labels_in_range_p (insn, end)
3133 rtx insn;
3134 int end;
3136 while (insn && INSN_LUID (insn) <= end)
3138 if (GET_CODE (insn) == CODE_LABEL)
3139 return 1;
3140 insn = NEXT_INSN (insn);
3143 return 0;
3146 /* Record that a memory reference X is being set. */
3148 static void
3149 note_addr_stored (x, y, data)
3150 rtx x;
3151 rtx y ATTRIBUTE_UNUSED;
3152 void *data ATTRIBUTE_UNUSED;
3154 struct loop_info *loop_info = data;
3156 if (x == 0 || GET_CODE (x) != MEM)
3157 return;
3159 /* Count number of memory writes.
3160 This affects heuristics in strength_reduce. */
3161 loop_info->num_mem_sets++;
3163 /* BLKmode MEM means all memory is clobbered. */
3164 if (GET_MODE (x) == BLKmode)
3166 if (RTX_UNCHANGING_P (x))
3167 loop_info->unknown_constant_address_altered = 1;
3168 else
3169 loop_info->unknown_address_altered = 1;
3171 return;
3174 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3175 loop_info->store_mems);
3178 /* X is a value modified by an INSN that references a biv inside a loop
3179 exit test (ie, X is somehow related to the value of the biv). If X
3180 is a pseudo that is used more than once, then the biv is (effectively)
3181 used more than once. DATA is a pointer to a loop_regs structure. */
3183 static void
3184 note_set_pseudo_multiple_uses (x, y, data)
3185 rtx x;
3186 rtx y ATTRIBUTE_UNUSED;
3187 void *data;
3189 struct loop_regs *regs = (struct loop_regs *) data;
3191 if (x == 0)
3192 return;
3194 while (GET_CODE (x) == STRICT_LOW_PART
3195 || GET_CODE (x) == SIGN_EXTRACT
3196 || GET_CODE (x) == ZERO_EXTRACT
3197 || GET_CODE (x) == SUBREG)
3198 x = XEXP (x, 0);
3200 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3201 return;
3203 /* If we do not have usage information, or if we know the register
3204 is used more than once, note that fact for check_dbra_loop. */
3205 if (REGNO (x) >= max_reg_before_loop
3206 || ! regs->array[REGNO (x)].single_usage
3207 || regs->array[REGNO (x)].single_usage == const0_rtx)
3208 regs->multiple_uses = 1;
3211 /* Return nonzero if the rtx X is invariant over the current loop.
3213 The value is 2 if we refer to something only conditionally invariant.
3215 A memory ref is invariant if it is not volatile and does not conflict
3216 with anything stored in `loop_info->store_mems'. */
3219 loop_invariant_p (loop, x)
3220 const struct loop *loop;
3221 rtx x;
3223 struct loop_info *loop_info = LOOP_INFO (loop);
3224 struct loop_regs *regs = LOOP_REGS (loop);
3225 int i;
3226 enum rtx_code code;
3227 const char *fmt;
3228 int conditional = 0;
3229 rtx mem_list_entry;
3231 if (x == 0)
3232 return 1;
3233 code = GET_CODE (x);
3234 switch (code)
3236 case CONST_INT:
3237 case CONST_DOUBLE:
3238 case SYMBOL_REF:
3239 case CONST:
3240 return 1;
3242 case LABEL_REF:
3243 /* A LABEL_REF is normally invariant, however, if we are unrolling
3244 loops, and this label is inside the loop, then it isn't invariant.
3245 This is because each unrolled copy of the loop body will have
3246 a copy of this label. If this was invariant, then an insn loading
3247 the address of this label into a register might get moved outside
3248 the loop, and then each loop body would end up using the same label.
3250 We don't know the loop bounds here though, so just fail for all
3251 labels. */
3252 if (flag_unroll_loops)
3253 return 0;
3254 else
3255 return 1;
3257 case PC:
3258 case CC0:
3259 case UNSPEC_VOLATILE:
3260 return 0;
3262 case REG:
3263 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3264 since the reg might be set by initialization within the loop. */
3266 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3267 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3268 && ! current_function_has_nonlocal_goto)
3269 return 1;
3271 if (LOOP_INFO (loop)->has_call
3272 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3273 return 0;
3275 if (regs->array[REGNO (x)].set_in_loop < 0)
3276 return 2;
3278 return regs->array[REGNO (x)].set_in_loop == 0;
3280 case MEM:
3281 /* Volatile memory references must be rejected. Do this before
3282 checking for read-only items, so that volatile read-only items
3283 will be rejected also. */
3284 if (MEM_VOLATILE_P (x))
3285 return 0;
3287 /* See if there is any dependence between a store and this load. */
3288 mem_list_entry = loop_info->store_mems;
3289 while (mem_list_entry)
3291 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3292 x, rtx_varies_p))
3293 return 0;
3295 mem_list_entry = XEXP (mem_list_entry, 1);
3298 /* It's not invalidated by a store in memory
3299 but we must still verify the address is invariant. */
3300 break;
3302 case ASM_OPERANDS:
3303 /* Don't mess with insns declared volatile. */
3304 if (MEM_VOLATILE_P (x))
3305 return 0;
3306 break;
3308 default:
3309 break;
3312 fmt = GET_RTX_FORMAT (code);
3313 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3315 if (fmt[i] == 'e')
3317 int tem = loop_invariant_p (loop, XEXP (x, i));
3318 if (tem == 0)
3319 return 0;
3320 if (tem == 2)
3321 conditional = 1;
3323 else if (fmt[i] == 'E')
3325 int j;
3326 for (j = 0; j < XVECLEN (x, i); j++)
3328 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3329 if (tem == 0)
3330 return 0;
3331 if (tem == 2)
3332 conditional = 1;
3338 return 1 + conditional;
3341 /* Return nonzero if all the insns in the loop that set REG
3342 are INSN and the immediately following insns,
3343 and if each of those insns sets REG in an invariant way
3344 (not counting uses of REG in them).
3346 The value is 2 if some of these insns are only conditionally invariant.
3348 We assume that INSN itself is the first set of REG
3349 and that its source is invariant. */
3351 static int
3352 consec_sets_invariant_p (loop, reg, n_sets, insn)
3353 const struct loop *loop;
3354 int n_sets;
3355 rtx reg, insn;
3357 struct loop_regs *regs = LOOP_REGS (loop);
3358 rtx p = insn;
3359 unsigned int regno = REGNO (reg);
3360 rtx temp;
3361 /* Number of sets we have to insist on finding after INSN. */
3362 int count = n_sets - 1;
3363 int old = regs->array[regno].set_in_loop;
3364 int value = 0;
3365 int this;
3367 /* If N_SETS hit the limit, we can't rely on its value. */
3368 if (n_sets == 127)
3369 return 0;
3371 regs->array[regno].set_in_loop = 0;
3373 while (count > 0)
3375 enum rtx_code code;
3376 rtx set;
3378 p = NEXT_INSN (p);
3379 code = GET_CODE (p);
3381 /* If library call, skip to end of it. */
3382 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3383 p = XEXP (temp, 0);
3385 this = 0;
3386 if (code == INSN
3387 && (set = single_set (p))
3388 && GET_CODE (SET_DEST (set)) == REG
3389 && REGNO (SET_DEST (set)) == regno)
3391 this = loop_invariant_p (loop, SET_SRC (set));
3392 if (this != 0)
3393 value |= this;
3394 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3396 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3397 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3398 notes are OK. */
3399 this = (CONSTANT_P (XEXP (temp, 0))
3400 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3401 && loop_invariant_p (loop, XEXP (temp, 0))));
3402 if (this != 0)
3403 value |= this;
3406 if (this != 0)
3407 count--;
3408 else if (code != NOTE)
3410 regs->array[regno].set_in_loop = old;
3411 return 0;
3415 regs->array[regno].set_in_loop = old;
3416 /* If loop_invariant_p ever returned 2, we return 2. */
3417 return 1 + (value & 2);
3420 #if 0
3421 /* I don't think this condition is sufficient to allow INSN
3422 to be moved, so we no longer test it. */
3424 /* Return 1 if all insns in the basic block of INSN and following INSN
3425 that set REG are invariant according to TABLE. */
3427 static int
3428 all_sets_invariant_p (reg, insn, table)
3429 rtx reg, insn;
3430 short *table;
3432 rtx p = insn;
3433 int regno = REGNO (reg);
3435 while (1)
3437 enum rtx_code code;
3438 p = NEXT_INSN (p);
3439 code = GET_CODE (p);
3440 if (code == CODE_LABEL || code == JUMP_INSN)
3441 return 1;
3442 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3443 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3444 && REGNO (SET_DEST (PATTERN (p))) == regno)
3446 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3447 return 0;
3451 #endif /* 0 */
3453 /* Look at all uses (not sets) of registers in X. For each, if it is
3454 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3455 a different insn, set USAGE[REGNO] to const0_rtx. */
3457 static void
3458 find_single_use_in_loop (regs, insn, x)
3459 struct loop_regs *regs;
3460 rtx insn;
3461 rtx x;
3463 enum rtx_code code = GET_CODE (x);
3464 const char *fmt = GET_RTX_FORMAT (code);
3465 int i, j;
3467 if (code == REG)
3468 regs->array[REGNO (x)].single_usage
3469 = (regs->array[REGNO (x)].single_usage != 0
3470 && regs->array[REGNO (x)].single_usage != insn)
3471 ? const0_rtx : insn;
3473 else if (code == SET)
3475 /* Don't count SET_DEST if it is a REG; otherwise count things
3476 in SET_DEST because if a register is partially modified, it won't
3477 show up as a potential movable so we don't care how USAGE is set
3478 for it. */
3479 if (GET_CODE (SET_DEST (x)) != REG)
3480 find_single_use_in_loop (regs, insn, SET_DEST (x));
3481 find_single_use_in_loop (regs, insn, SET_SRC (x));
3483 else
3484 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3486 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3487 find_single_use_in_loop (regs, insn, XEXP (x, i));
3488 else if (fmt[i] == 'E')
3489 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3490 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3494 /* Count and record any set in X which is contained in INSN. Update
3495 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3496 in X. */
3498 static void
3499 count_one_set (regs, insn, x, last_set)
3500 struct loop_regs *regs;
3501 rtx insn, x;
3502 rtx *last_set;
3504 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3505 /* Don't move a reg that has an explicit clobber.
3506 It's not worth the pain to try to do it correctly. */
3507 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3509 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3511 rtx dest = SET_DEST (x);
3512 while (GET_CODE (dest) == SUBREG
3513 || GET_CODE (dest) == ZERO_EXTRACT
3514 || GET_CODE (dest) == SIGN_EXTRACT
3515 || GET_CODE (dest) == STRICT_LOW_PART)
3516 dest = XEXP (dest, 0);
3517 if (GET_CODE (dest) == REG)
3519 int i;
3520 int regno = REGNO (dest);
3521 for (i = 0; i < (int) LOOP_REGNO_NREGS (regno, dest); i++)
3523 /* If this is the first setting of this reg
3524 in current basic block, and it was set before,
3525 it must be set in two basic blocks, so it cannot
3526 be moved out of the loop. */
3527 if (regs->array[regno].set_in_loop > 0
3528 && last_set == 0)
3529 regs->array[regno+i].may_not_optimize = 1;
3530 /* If this is not first setting in current basic block,
3531 see if reg was used in between previous one and this.
3532 If so, neither one can be moved. */
3533 if (last_set[regno] != 0
3534 && reg_used_between_p (dest, last_set[regno], insn))
3535 regs->array[regno+i].may_not_optimize = 1;
3536 if (regs->array[regno+i].set_in_loop < 127)
3537 ++regs->array[regno+i].set_in_loop;
3538 last_set[regno+i] = insn;
3544 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3545 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3546 contained in insn INSN is used by any insn that precedes INSN in
3547 cyclic order starting from the loop entry point.
3549 We don't want to use INSN_LUID here because if we restrict INSN to those
3550 that have a valid INSN_LUID, it means we cannot move an invariant out
3551 from an inner loop past two loops. */
3553 static int
3554 loop_reg_used_before_p (loop, set, insn)
3555 const struct loop *loop;
3556 rtx set, insn;
3558 rtx reg = SET_DEST (set);
3559 rtx p;
3561 /* Scan forward checking for register usage. If we hit INSN, we
3562 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3563 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3565 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3566 return 1;
3568 if (p == loop->end)
3569 p = loop->start;
3572 return 0;
3576 /* Information we collect about arrays that we might want to prefetch. */
3577 struct prefetch_info
3579 struct iv_class *class; /* Class this prefetch is based on. */
3580 struct induction *giv; /* GIV this prefetch is based on. */
3581 rtx base_address; /* Start prefetching from this address plus
3582 index. */
3583 HOST_WIDE_INT index;
3584 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3585 iteration. */
3586 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3587 prefetch area in one iteration. */
3588 unsigned int total_bytes; /* Total bytes loop will access in this block.
3589 This is set only for loops with known
3590 iteration counts and is 0xffffffff
3591 otherwise. */
3592 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3593 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3594 unsigned int write : 1; /* 1 for read/write prefetches. */
3597 /* Data used by check_store function. */
3598 struct check_store_data
3600 rtx mem_address;
3601 int mem_write;
3604 static void check_store PARAMS ((rtx, rtx, void *));
3605 static void emit_prefetch_instructions PARAMS ((struct loop *));
3606 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3608 /* Set mem_write when mem_address is found. Used as callback to
3609 note_stores. */
3610 static void
3611 check_store (x, pat, data)
3612 rtx x, pat ATTRIBUTE_UNUSED;
3613 void *data;
3615 struct check_store_data *d = (struct check_store_data *) data;
3617 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3618 d->mem_write = 1;
3621 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3622 important to get some addresses combined. Later more sophisticated
3623 transformations can be added when necesary.
3625 ??? Same trick with swapping operand is done at several other places.
3626 It can be nice to develop some common way to handle this. */
3628 static int
3629 rtx_equal_for_prefetch_p (x, y)
3630 rtx x, y;
3632 int i;
3633 int j;
3634 enum rtx_code code = GET_CODE (x);
3635 const char *fmt;
3637 if (x == y)
3638 return 1;
3639 if (code != GET_CODE (y))
3640 return 0;
3642 code = GET_CODE (x);
3644 if (GET_RTX_CLASS (code) == 'c')
3646 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3647 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3648 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3649 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3651 /* Compare the elements. If any pair of corresponding elements fails to
3652 match, return 0 for the whole thing. */
3654 fmt = GET_RTX_FORMAT (code);
3655 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3657 switch (fmt[i])
3659 case 'w':
3660 if (XWINT (x, i) != XWINT (y, i))
3661 return 0;
3662 break;
3664 case 'i':
3665 if (XINT (x, i) != XINT (y, i))
3666 return 0;
3667 break;
3669 case 'E':
3670 /* Two vectors must have the same length. */
3671 if (XVECLEN (x, i) != XVECLEN (y, i))
3672 return 0;
3674 /* And the corresponding elements must match. */
3675 for (j = 0; j < XVECLEN (x, i); j++)
3676 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3677 XVECEXP (y, i, j)) == 0)
3678 return 0;
3679 break;
3681 case 'e':
3682 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3683 return 0;
3684 break;
3686 case 's':
3687 if (strcmp (XSTR (x, i), XSTR (y, i)))
3688 return 0;
3689 break;
3691 case 'u':
3692 /* These are just backpointers, so they don't matter. */
3693 break;
3695 case '0':
3696 break;
3698 /* It is believed that rtx's at this level will never
3699 contain anything but integers and other rtx's,
3700 except for within LABEL_REFs and SYMBOL_REFs. */
3701 default:
3702 abort ();
3705 return 1;
3708 /* Remove constant addition value from the expression X (when present)
3709 and return it. */
3711 static HOST_WIDE_INT
3712 remove_constant_addition (x)
3713 rtx *x;
3715 HOST_WIDE_INT addval = 0;
3716 rtx exp = *x;
3718 /* Avoid clobbering a shared CONST expression. */
3719 if (GET_CODE (exp) == CONST)
3721 if (GET_CODE (XEXP (exp, 0)) == PLUS
3722 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3723 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3725 *x = XEXP (XEXP (exp, 0), 0);
3726 return INTVAL (XEXP (XEXP (exp, 0), 1));
3728 return 0;
3731 if (GET_CODE (exp) == CONST_INT)
3733 addval = INTVAL (exp);
3734 *x = const0_rtx;
3737 /* For plus expression recurse on ourself. */
3738 else if (GET_CODE (exp) == PLUS)
3740 addval += remove_constant_addition (&XEXP (exp, 0));
3741 addval += remove_constant_addition (&XEXP (exp, 1));
3743 /* In case our parameter was constant, remove extra zero from the
3744 expression. */
3745 if (XEXP (exp, 0) == const0_rtx)
3746 *x = XEXP (exp, 1);
3747 else if (XEXP (exp, 1) == const0_rtx)
3748 *x = XEXP (exp, 0);
3751 return addval;
3754 /* Attempt to identify accesses to arrays that are most likely to cause cache
3755 misses, and emit prefetch instructions a few prefetch blocks forward.
3757 To detect the arrays we use the GIV information that was collected by the
3758 strength reduction pass.
3760 The prefetch instructions are generated after the GIV information is done
3761 and before the strength reduction process. The new GIVs are injected into
3762 the strength reduction tables, so the prefetch addresses are optimized as
3763 well.
3765 GIVs are split into base address, stride, and constant addition values.
3766 GIVs with the same address, stride and close addition values are combined
3767 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3768 for write instructions can be used for the block we write to, on machines
3769 that support write prefetches.
3771 Several heuristics are used to determine when to prefetch. They are
3772 controlled by defined symbols that can be overridden for each target. */
3774 static void
3775 emit_prefetch_instructions (loop)
3776 struct loop *loop;
3778 int num_prefetches = 0;
3779 int num_real_prefetches = 0;
3780 int num_real_write_prefetches = 0;
3781 int num_prefetches_before = 0;
3782 int num_write_prefetches_before = 0;
3783 int ahead = 0;
3784 int i;
3785 struct iv_class *bl;
3786 struct induction *iv;
3787 struct prefetch_info info[MAX_PREFETCHES];
3788 struct loop_ivs *ivs = LOOP_IVS (loop);
3790 if (!HAVE_prefetch)
3791 return;
3793 /* Consider only loops w/o calls. When a call is done, the loop is probably
3794 slow enough to read the memory. */
3795 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3797 if (loop_dump_stream)
3798 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3800 return;
3803 /* Don't prefetch in loops known to have few iterations. */
3804 if (PREFETCH_NO_LOW_LOOPCNT
3805 && LOOP_INFO (loop)->n_iterations
3806 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3808 if (loop_dump_stream)
3809 fprintf (loop_dump_stream,
3810 "Prefetch: ignoring loop: not enough iterations.\n");
3811 return;
3814 /* Search all induction variables and pick those interesting for the prefetch
3815 machinery. */
3816 for (bl = ivs->list; bl; bl = bl->next)
3818 struct induction *biv = bl->biv, *biv1;
3819 int basestride = 0;
3821 biv1 = biv;
3823 /* Expect all BIVs to be executed in each iteration. This makes our
3824 analysis more conservative. */
3825 while (biv1)
3827 /* Discard non-constant additions that we can't handle well yet, and
3828 BIVs that are executed multiple times; such BIVs ought to be
3829 handled in the nested loop. We accept not_every_iteration BIVs,
3830 since these only result in larger strides and make our
3831 heuristics more conservative. */
3832 if (GET_CODE (biv->add_val) != CONST_INT)
3834 if (loop_dump_stream)
3836 fprintf (loop_dump_stream,
3837 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3838 REGNO (biv->src_reg), INSN_UID (biv->insn));
3839 print_rtl (loop_dump_stream, biv->add_val);
3840 fprintf (loop_dump_stream, "\n");
3842 break;
3845 if (biv->maybe_multiple)
3847 if (loop_dump_stream)
3849 fprintf (loop_dump_stream,
3850 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3851 REGNO (biv->src_reg), INSN_UID (biv->insn));
3852 print_rtl (loop_dump_stream, biv->add_val);
3853 fprintf (loop_dump_stream, "\n");
3855 break;
3858 basestride += INTVAL (biv1->add_val);
3859 biv1 = biv1->next_iv;
3862 if (biv1 || !basestride)
3863 continue;
3865 for (iv = bl->giv; iv; iv = iv->next_iv)
3867 rtx address;
3868 rtx temp;
3869 HOST_WIDE_INT index = 0;
3870 int add = 1;
3871 HOST_WIDE_INT stride = 0;
3872 int stride_sign = 1;
3873 struct check_store_data d;
3874 const char *ignore_reason = NULL;
3875 int size = GET_MODE_SIZE (GET_MODE (iv));
3877 /* See whether an induction variable is interesting to us and if
3878 not, report the reason. */
3879 if (iv->giv_type != DEST_ADDR)
3880 ignore_reason = "giv is not a destination address";
3882 /* We are interested only in constant stride memory references
3883 in order to be able to compute density easily. */
3884 else if (GET_CODE (iv->mult_val) != CONST_INT)
3885 ignore_reason = "stride is not constant";
3887 else
3889 stride = INTVAL (iv->mult_val) * basestride;
3890 if (stride < 0)
3892 stride = -stride;
3893 stride_sign = -1;
3896 /* On some targets, reversed order prefetches are not
3897 worthwhile. */
3898 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3899 ignore_reason = "reversed order stride";
3901 /* Prefetch of accesses with an extreme stride might not be
3902 worthwhile, either. */
3903 else if (PREFETCH_NO_EXTREME_STRIDE
3904 && stride > PREFETCH_EXTREME_STRIDE)
3905 ignore_reason = "extreme stride";
3907 /* Ignore GIVs with varying add values; we can't predict the
3908 value for the next iteration. */
3909 else if (!loop_invariant_p (loop, iv->add_val))
3910 ignore_reason = "giv has varying add value";
3912 /* Ignore GIVs in the nested loops; they ought to have been
3913 handled already. */
3914 else if (iv->maybe_multiple)
3915 ignore_reason = "giv is in nested loop";
3918 if (ignore_reason != NULL)
3920 if (loop_dump_stream)
3921 fprintf (loop_dump_stream,
3922 "Prefetch: ignoring giv at %d: %s.\n",
3923 INSN_UID (iv->insn), ignore_reason);
3924 continue;
3927 /* Determine the pointer to the basic array we are examining. It is
3928 the sum of the BIV's initial value and the GIV's add_val. */
3929 address = copy_rtx (iv->add_val);
3930 temp = copy_rtx (bl->initial_value);
3932 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3933 index = remove_constant_addition (&address);
3935 d.mem_write = 0;
3936 d.mem_address = *iv->location;
3938 /* When the GIV is not always executed, we might be better off by
3939 not dirtying the cache pages. */
3940 if (PREFETCH_CONDITIONAL || iv->always_executed)
3941 note_stores (PATTERN (iv->insn), check_store, &d);
3942 else
3944 if (loop_dump_stream)
3945 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3946 INSN_UID (iv->insn), "in conditional code.");
3947 continue;
3950 /* Attempt to find another prefetch to the same array and see if we
3951 can merge this one. */
3952 for (i = 0; i < num_prefetches; i++)
3953 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3954 && stride == info[i].stride)
3956 /* In case both access same array (same location
3957 just with small difference in constant indexes), merge
3958 the prefetches. Just do the later and the earlier will
3959 get prefetched from previous iteration.
3960 The artificial threshold should not be too small,
3961 but also not bigger than small portion of memory usually
3962 traversed by single loop. */
3963 if (index >= info[i].index
3964 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3966 info[i].write |= d.mem_write;
3967 info[i].bytes_accessed += size;
3968 info[i].index = index;
3969 info[i].giv = iv;
3970 info[i].class = bl;
3971 info[num_prefetches].base_address = address;
3972 add = 0;
3973 break;
3976 if (index < info[i].index
3977 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3979 info[i].write |= d.mem_write;
3980 info[i].bytes_accessed += size;
3981 add = 0;
3982 break;
3986 /* Merging failed. */
3987 if (add)
3989 info[num_prefetches].giv = iv;
3990 info[num_prefetches].class = bl;
3991 info[num_prefetches].index = index;
3992 info[num_prefetches].stride = stride;
3993 info[num_prefetches].base_address = address;
3994 info[num_prefetches].write = d.mem_write;
3995 info[num_prefetches].bytes_accessed = size;
3996 num_prefetches++;
3997 if (num_prefetches >= MAX_PREFETCHES)
3999 if (loop_dump_stream)
4000 fprintf (loop_dump_stream,
4001 "Maximal number of prefetches exceeded.\n");
4002 return;
4008 for (i = 0; i < num_prefetches; i++)
4010 int density;
4012 /* Attempt to calculate the total number of bytes fetched by all
4013 iterations of the loop. Avoid overflow. */
4014 if (LOOP_INFO (loop)->n_iterations
4015 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4016 >= LOOP_INFO (loop)->n_iterations))
4017 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4018 else
4019 info[i].total_bytes = 0xffffffff;
4021 density = info[i].bytes_accessed * 100 / info[i].stride;
4023 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4024 if (PREFETCH_ONLY_DENSE_MEM)
4025 if (density * 256 > PREFETCH_DENSE_MEM * 100
4026 && (info[i].total_bytes / PREFETCH_BLOCK
4027 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4029 info[i].prefetch_before_loop = 1;
4030 info[i].prefetch_in_loop
4031 = (info[i].total_bytes / PREFETCH_BLOCK
4032 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4034 else
4036 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4037 if (loop_dump_stream)
4038 fprintf (loop_dump_stream,
4039 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4040 INSN_UID (info[i].giv->insn), density);
4042 else
4043 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4045 /* Find how many prefetch instructions we'll use within the loop. */
4046 if (info[i].prefetch_in_loop != 0)
4048 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4049 / PREFETCH_BLOCK);
4050 num_real_prefetches += info[i].prefetch_in_loop;
4051 if (info[i].write)
4052 num_real_write_prefetches += info[i].prefetch_in_loop;
4056 /* Determine how many iterations ahead to prefetch within the loop, based
4057 on how many prefetches we currently expect to do within the loop. */
4058 if (num_real_prefetches != 0)
4060 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4062 if (loop_dump_stream)
4063 fprintf (loop_dump_stream,
4064 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4065 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4066 num_real_prefetches = 0, num_real_write_prefetches = 0;
4069 /* We'll also use AHEAD to determine how many prefetch instructions to
4070 emit before a loop, so don't leave it zero. */
4071 if (ahead == 0)
4072 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4074 for (i = 0; i < num_prefetches; i++)
4076 /* Update if we've decided not to prefetch anything within the loop. */
4077 if (num_real_prefetches == 0)
4078 info[i].prefetch_in_loop = 0;
4080 /* Find how many prefetch instructions we'll use before the loop. */
4081 if (info[i].prefetch_before_loop != 0)
4083 int n = info[i].total_bytes / PREFETCH_BLOCK;
4084 if (n > ahead)
4085 n = ahead;
4086 info[i].prefetch_before_loop = n;
4087 num_prefetches_before += n;
4088 if (info[i].write)
4089 num_write_prefetches_before += n;
4092 if (loop_dump_stream)
4094 if (info[i].prefetch_in_loop == 0
4095 && info[i].prefetch_before_loop == 0)
4096 continue;
4097 fprintf (loop_dump_stream, "Prefetch insn: %d",
4098 INSN_UID (info[i].giv->insn));
4099 fprintf (loop_dump_stream,
4100 "; in loop: %d; before: %d; %s\n",
4101 info[i].prefetch_in_loop,
4102 info[i].prefetch_before_loop,
4103 info[i].write ? "read/write" : "read only");
4104 fprintf (loop_dump_stream,
4105 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4106 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4107 info[i].bytes_accessed, info[i].total_bytes);
4108 fprintf (loop_dump_stream, " index: ");
4109 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4110 fprintf (loop_dump_stream, "; stride: ");
4111 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4112 fprintf (loop_dump_stream, "; address: ");
4113 print_rtl (loop_dump_stream, info[i].base_address);
4114 fprintf (loop_dump_stream, "\n");
4118 if (num_real_prefetches + num_prefetches_before > 0)
4120 /* Record that this loop uses prefetch instructions. */
4121 LOOP_INFO (loop)->has_prefetch = 1;
4123 if (loop_dump_stream)
4125 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4126 num_real_prefetches, num_real_write_prefetches);
4127 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4128 num_prefetches_before, num_write_prefetches_before);
4132 for (i = 0; i < num_prefetches; i++)
4134 int y;
4136 for (y = 0; y < info[i].prefetch_in_loop; y++)
4138 rtx loc = copy_rtx (*info[i].giv->location);
4139 rtx insn;
4140 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4141 rtx before_insn = info[i].giv->insn;
4142 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4143 rtx seq;
4145 /* We can save some effort by offsetting the address on
4146 architectures with offsettable memory references. */
4147 if (offsettable_address_p (0, VOIDmode, loc))
4148 loc = plus_constant (loc, bytes_ahead);
4149 else
4151 rtx reg = gen_reg_rtx (Pmode);
4152 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4153 GEN_INT (bytes_ahead), reg,
4154 0, before_insn);
4155 loc = reg;
4158 start_sequence ();
4159 /* Make sure the address operand is valid for prefetch. */
4160 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4161 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4162 loc = force_reg (Pmode, loc);
4163 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4164 GEN_INT (3)));
4165 seq = get_insns ();
4166 end_sequence ();
4167 emit_insn_before (seq, before_insn);
4169 /* Check all insns emitted and record the new GIV
4170 information. */
4171 insn = NEXT_INSN (prev_insn);
4172 while (insn != before_insn)
4174 insn = check_insn_for_givs (loop, insn,
4175 info[i].giv->always_executed,
4176 info[i].giv->maybe_multiple);
4177 insn = NEXT_INSN (insn);
4181 if (PREFETCH_BEFORE_LOOP)
4183 /* Emit insns before the loop to fetch the first cache lines or,
4184 if we're not prefetching within the loop, everything we expect
4185 to need. */
4186 for (y = 0; y < info[i].prefetch_before_loop; y++)
4188 rtx reg = gen_reg_rtx (Pmode);
4189 rtx loop_start = loop->start;
4190 rtx init_val = info[i].class->initial_value;
4191 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4192 info[i].giv->add_val,
4193 GEN_INT (y * PREFETCH_BLOCK));
4195 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4196 non-constant INIT_VAL to have the same mode as REG, which
4197 in this case we know to be Pmode. */
4198 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4199 init_val = convert_to_mode (Pmode, init_val, 0);
4200 loop_iv_add_mult_emit_before (loop, init_val,
4201 info[i].giv->mult_val,
4202 add_val, reg, 0, loop_start);
4203 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4204 GEN_INT (3)),
4205 loop_start);
4210 return;
4213 /* A "basic induction variable" or biv is a pseudo reg that is set
4214 (within this loop) only by incrementing or decrementing it. */
4215 /* A "general induction variable" or giv is a pseudo reg whose
4216 value is a linear function of a biv. */
4218 /* Bivs are recognized by `basic_induction_var';
4219 Givs by `general_induction_var'. */
4221 /* Communication with routines called via `note_stores'. */
4223 static rtx note_insn;
4225 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4227 static rtx addr_placeholder;
4229 /* ??? Unfinished optimizations, and possible future optimizations,
4230 for the strength reduction code. */
4232 /* ??? The interaction of biv elimination, and recognition of 'constant'
4233 bivs, may cause problems. */
4235 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4236 performance problems.
4238 Perhaps don't eliminate things that can be combined with an addressing
4239 mode. Find all givs that have the same biv, mult_val, and add_val;
4240 then for each giv, check to see if its only use dies in a following
4241 memory address. If so, generate a new memory address and check to see
4242 if it is valid. If it is valid, then store the modified memory address,
4243 otherwise, mark the giv as not done so that it will get its own iv. */
4245 /* ??? Could try to optimize branches when it is known that a biv is always
4246 positive. */
4248 /* ??? When replace a biv in a compare insn, we should replace with closest
4249 giv so that an optimized branch can still be recognized by the combiner,
4250 e.g. the VAX acb insn. */
4252 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4253 was rerun in loop_optimize whenever a register was added or moved.
4254 Also, some of the optimizations could be a little less conservative. */
4256 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4257 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4258 callback.
4260 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4261 least once for every loop iteration except for the last one.
4263 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4264 loop iteration.
4266 void
4267 for_each_insn_in_loop (loop, fncall)
4268 struct loop *loop;
4269 loop_insn_callback fncall;
4271 int not_every_iteration = 0;
4272 int maybe_multiple = 0;
4273 int past_loop_latch = 0;
4274 int loop_depth = 0;
4275 rtx p;
4277 /* If loop_scan_start points to the loop exit test, we have to be wary of
4278 subversive use of gotos inside expression statements. */
4279 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4280 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4282 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4283 for (p = next_insn_in_loop (loop, loop->scan_start);
4284 p != NULL_RTX;
4285 p = next_insn_in_loop (loop, p))
4287 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4289 /* Past CODE_LABEL, we get to insns that may be executed multiple
4290 times. The only way we can be sure that they can't is if every
4291 jump insn between here and the end of the loop either
4292 returns, exits the loop, is a jump to a location that is still
4293 behind the label, or is a jump to the loop start. */
4295 if (GET_CODE (p) == CODE_LABEL)
4297 rtx insn = p;
4299 maybe_multiple = 0;
4301 while (1)
4303 insn = NEXT_INSN (insn);
4304 if (insn == loop->scan_start)
4305 break;
4306 if (insn == loop->end)
4308 if (loop->top != 0)
4309 insn = loop->top;
4310 else
4311 break;
4312 if (insn == loop->scan_start)
4313 break;
4316 if (GET_CODE (insn) == JUMP_INSN
4317 && GET_CODE (PATTERN (insn)) != RETURN
4318 && (!any_condjump_p (insn)
4319 || (JUMP_LABEL (insn) != 0
4320 && JUMP_LABEL (insn) != loop->scan_start
4321 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4323 maybe_multiple = 1;
4324 break;
4329 /* Past a jump, we get to insns for which we can't count
4330 on whether they will be executed during each iteration. */
4331 /* This code appears twice in strength_reduce. There is also similar
4332 code in scan_loop. */
4333 if (GET_CODE (p) == JUMP_INSN
4334 /* If we enter the loop in the middle, and scan around to the
4335 beginning, don't set not_every_iteration for that.
4336 This can be any kind of jump, since we want to know if insns
4337 will be executed if the loop is executed. */
4338 && !(JUMP_LABEL (p) == loop->top
4339 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4340 && any_uncondjump_p (p))
4341 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4343 rtx label = 0;
4345 /* If this is a jump outside the loop, then it also doesn't
4346 matter. Check to see if the target of this branch is on the
4347 loop->exits_labels list. */
4349 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4350 if (XEXP (label, 0) == JUMP_LABEL (p))
4351 break;
4353 if (!label)
4354 not_every_iteration = 1;
4357 else if (GET_CODE (p) == NOTE)
4359 /* At the virtual top of a converted loop, insns are again known to
4360 be executed each iteration: logically, the loop begins here
4361 even though the exit code has been duplicated.
4363 Insns are also again known to be executed each iteration at
4364 the LOOP_CONT note. */
4365 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4366 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4367 && loop_depth == 0)
4368 not_every_iteration = 0;
4369 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4370 loop_depth++;
4371 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4372 loop_depth--;
4375 /* Note if we pass a loop latch. If we do, then we can not clear
4376 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4377 a loop since a jump before the last CODE_LABEL may have started
4378 a new loop iteration.
4380 Note that LOOP_TOP is only set for rotated loops and we need
4381 this check for all loops, so compare against the CODE_LABEL
4382 which immediately follows LOOP_START. */
4383 if (GET_CODE (p) == JUMP_INSN
4384 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4385 past_loop_latch = 1;
4387 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4388 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4389 or not an insn is known to be executed each iteration of the
4390 loop, whether or not any iterations are known to occur.
4392 Therefore, if we have just passed a label and have no more labels
4393 between here and the test insn of the loop, and we have not passed
4394 a jump to the top of the loop, then we know these insns will be
4395 executed each iteration. */
4397 if (not_every_iteration
4398 && !past_loop_latch
4399 && GET_CODE (p) == CODE_LABEL
4400 && no_labels_between_p (p, loop->end)
4401 && loop_insn_first_p (p, loop->cont))
4402 not_every_iteration = 0;
4406 static void
4407 loop_bivs_find (loop)
4408 struct loop *loop;
4410 struct loop_regs *regs = LOOP_REGS (loop);
4411 struct loop_ivs *ivs = LOOP_IVS (loop);
4412 /* Temporary list pointers for traversing ivs->list. */
4413 struct iv_class *bl, **backbl;
4415 ivs->list = 0;
4417 for_each_insn_in_loop (loop, check_insn_for_bivs);
4419 /* Scan ivs->list to remove all regs that proved not to be bivs.
4420 Make a sanity check against regs->n_times_set. */
4421 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4423 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4424 /* Above happens if register modified by subreg, etc. */
4425 /* Make sure it is not recognized as a basic induction var: */
4426 || regs->array[bl->regno].n_times_set != bl->biv_count
4427 /* If never incremented, it is invariant that we decided not to
4428 move. So leave it alone. */
4429 || ! bl->incremented)
4431 if (loop_dump_stream)
4432 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4433 bl->regno,
4434 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4435 ? "not induction variable"
4436 : (! bl->incremented ? "never incremented"
4437 : "count error")));
4439 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4440 *backbl = bl->next;
4442 else
4444 backbl = &bl->next;
4446 if (loop_dump_stream)
4447 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4453 /* Determine how BIVS are initialised by looking through pre-header
4454 extended basic block. */
4455 static void
4456 loop_bivs_init_find (loop)
4457 struct loop *loop;
4459 struct loop_ivs *ivs = LOOP_IVS (loop);
4460 /* Temporary list pointers for traversing ivs->list. */
4461 struct iv_class *bl;
4462 int call_seen;
4463 rtx p;
4465 /* Find initial value for each biv by searching backwards from loop_start,
4466 halting at first label. Also record any test condition. */
4468 call_seen = 0;
4469 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4471 rtx test;
4473 note_insn = p;
4475 if (GET_CODE (p) == CALL_INSN)
4476 call_seen = 1;
4478 if (INSN_P (p))
4479 note_stores (PATTERN (p), record_initial, ivs);
4481 /* Record any test of a biv that branches around the loop if no store
4482 between it and the start of loop. We only care about tests with
4483 constants and registers and only certain of those. */
4484 if (GET_CODE (p) == JUMP_INSN
4485 && JUMP_LABEL (p) != 0
4486 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4487 && (test = get_condition_for_loop (loop, p)) != 0
4488 && GET_CODE (XEXP (test, 0)) == REG
4489 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4490 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4491 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4492 && bl->init_insn == 0)
4494 /* If an NE test, we have an initial value! */
4495 if (GET_CODE (test) == NE)
4497 bl->init_insn = p;
4498 bl->init_set = gen_rtx_SET (VOIDmode,
4499 XEXP (test, 0), XEXP (test, 1));
4501 else
4502 bl->initial_test = test;
4508 /* Look at the each biv and see if we can say anything better about its
4509 initial value from any initializing insns set up above. (This is done
4510 in two passes to avoid missing SETs in a PARALLEL.) */
4511 static void
4512 loop_bivs_check (loop)
4513 struct loop *loop;
4515 struct loop_ivs *ivs = LOOP_IVS (loop);
4516 /* Temporary list pointers for traversing ivs->list. */
4517 struct iv_class *bl;
4518 struct iv_class **backbl;
4520 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4522 rtx src;
4523 rtx note;
4525 if (! bl->init_insn)
4526 continue;
4528 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4529 is a constant, use the value of that. */
4530 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4531 && CONSTANT_P (XEXP (note, 0)))
4532 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4533 && CONSTANT_P (XEXP (note, 0))))
4534 src = XEXP (note, 0);
4535 else
4536 src = SET_SRC (bl->init_set);
4538 if (loop_dump_stream)
4539 fprintf (loop_dump_stream,
4540 "Biv %d: initialized at insn %d: initial value ",
4541 bl->regno, INSN_UID (bl->init_insn));
4543 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4544 || GET_MODE (src) == VOIDmode)
4545 && valid_initial_value_p (src, bl->init_insn,
4546 LOOP_INFO (loop)->pre_header_has_call,
4547 loop->start))
4549 bl->initial_value = src;
4551 if (loop_dump_stream)
4553 print_simple_rtl (loop_dump_stream, src);
4554 fputc ('\n', loop_dump_stream);
4557 /* If we can't make it a giv,
4558 let biv keep initial value of "itself". */
4559 else if (loop_dump_stream)
4560 fprintf (loop_dump_stream, "is complex\n");
4565 /* Search the loop for general induction variables. */
4567 static void
4568 loop_givs_find (loop)
4569 struct loop* loop;
4571 for_each_insn_in_loop (loop, check_insn_for_givs);
4575 /* For each giv for which we still don't know whether or not it is
4576 replaceable, check to see if it is replaceable because its final value
4577 can be calculated. */
4579 static void
4580 loop_givs_check (loop)
4581 struct loop *loop;
4583 struct loop_ivs *ivs = LOOP_IVS (loop);
4584 struct iv_class *bl;
4586 for (bl = ivs->list; bl; bl = bl->next)
4588 struct induction *v;
4590 for (v = bl->giv; v; v = v->next_iv)
4591 if (! v->replaceable && ! v->not_replaceable)
4592 check_final_value (loop, v);
4597 /* Return non-zero if it is possible to eliminate the biv BL provided
4598 all givs are reduced. This is possible if either the reg is not
4599 used outside the loop, or we can compute what its final value will
4600 be. */
4602 static int
4603 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4604 struct loop *loop;
4605 struct iv_class *bl;
4606 int threshold;
4607 int insn_count;
4609 /* For architectures with a decrement_and_branch_until_zero insn,
4610 don't do this if we put a REG_NONNEG note on the endtest for this
4611 biv. */
4613 #ifdef HAVE_decrement_and_branch_until_zero
4614 if (bl->nonneg)
4616 if (loop_dump_stream)
4617 fprintf (loop_dump_stream,
4618 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4619 return 0;
4621 #endif
4623 /* Check that biv is used outside loop or if it has a final value.
4624 Compare against bl->init_insn rather than loop->start. We aren't
4625 concerned with any uses of the biv between init_insn and
4626 loop->start since these won't be affected by the value of the biv
4627 elsewhere in the function, so long as init_insn doesn't use the
4628 biv itself. */
4630 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4631 && bl->init_insn
4632 && INSN_UID (bl->init_insn) < max_uid_for_loop
4633 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4634 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4635 || (bl->final_value = final_biv_value (loop, bl)))
4636 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4638 if (loop_dump_stream)
4640 fprintf (loop_dump_stream,
4641 "Cannot eliminate biv %d.\n",
4642 bl->regno);
4643 fprintf (loop_dump_stream,
4644 "First use: insn %d, last use: insn %d.\n",
4645 REGNO_FIRST_UID (bl->regno),
4646 REGNO_LAST_UID (bl->regno));
4648 return 0;
4652 /* Reduce each giv of BL that we have decided to reduce. */
4654 static void
4655 loop_givs_reduce (loop, bl)
4656 struct loop *loop;
4657 struct iv_class *bl;
4659 struct induction *v;
4661 for (v = bl->giv; v; v = v->next_iv)
4663 struct induction *tv;
4664 if (! v->ignore && v->same == 0)
4666 int auto_inc_opt = 0;
4668 /* If the code for derived givs immediately below has already
4669 allocated a new_reg, we must keep it. */
4670 if (! v->new_reg)
4671 v->new_reg = gen_reg_rtx (v->mode);
4673 #ifdef AUTO_INC_DEC
4674 /* If the target has auto-increment addressing modes, and
4675 this is an address giv, then try to put the increment
4676 immediately after its use, so that flow can create an
4677 auto-increment addressing mode. */
4678 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4679 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4680 /* We don't handle reversed biv's because bl->biv->insn
4681 does not have a valid INSN_LUID. */
4682 && ! bl->reversed
4683 && v->always_executed && ! v->maybe_multiple
4684 && INSN_UID (v->insn) < max_uid_for_loop)
4686 /* If other giv's have been combined with this one, then
4687 this will work only if all uses of the other giv's occur
4688 before this giv's insn. This is difficult to check.
4690 We simplify this by looking for the common case where
4691 there is one DEST_REG giv, and this giv's insn is the
4692 last use of the dest_reg of that DEST_REG giv. If the
4693 increment occurs after the address giv, then we can
4694 perform the optimization. (Otherwise, the increment
4695 would have to go before other_giv, and we would not be
4696 able to combine it with the address giv to get an
4697 auto-inc address.) */
4698 if (v->combined_with)
4700 struct induction *other_giv = 0;
4702 for (tv = bl->giv; tv; tv = tv->next_iv)
4703 if (tv->same == v)
4705 if (other_giv)
4706 break;
4707 else
4708 other_giv = tv;
4710 if (! tv && other_giv
4711 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4712 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4713 == INSN_UID (v->insn))
4714 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4715 auto_inc_opt = 1;
4717 /* Check for case where increment is before the address
4718 giv. Do this test in "loop order". */
4719 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4720 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4721 || (INSN_LUID (bl->biv->insn)
4722 > INSN_LUID (loop->scan_start))))
4723 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4724 && (INSN_LUID (loop->scan_start)
4725 < INSN_LUID (bl->biv->insn))))
4726 auto_inc_opt = -1;
4727 else
4728 auto_inc_opt = 1;
4730 #ifdef HAVE_cc0
4732 rtx prev;
4734 /* We can't put an insn immediately after one setting
4735 cc0, or immediately before one using cc0. */
4736 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4737 || (auto_inc_opt == -1
4738 && (prev = prev_nonnote_insn (v->insn)) != 0
4739 && INSN_P (prev)
4740 && sets_cc0_p (PATTERN (prev))))
4741 auto_inc_opt = 0;
4743 #endif
4745 if (auto_inc_opt)
4746 v->auto_inc_opt = 1;
4748 #endif
4750 /* For each place where the biv is incremented, add an insn
4751 to increment the new, reduced reg for the giv. */
4752 for (tv = bl->biv; tv; tv = tv->next_iv)
4754 rtx insert_before;
4756 if (! auto_inc_opt)
4757 insert_before = NEXT_INSN (tv->insn);
4758 else if (auto_inc_opt == 1)
4759 insert_before = NEXT_INSN (v->insn);
4760 else
4761 insert_before = v->insn;
4763 if (tv->mult_val == const1_rtx)
4764 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4765 v->new_reg, v->new_reg,
4766 0, insert_before);
4767 else /* tv->mult_val == const0_rtx */
4768 /* A multiply is acceptable here
4769 since this is presumed to be seldom executed. */
4770 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4771 v->add_val, v->new_reg,
4772 0, insert_before);
4775 /* Add code at loop start to initialize giv's reduced reg. */
4777 loop_iv_add_mult_hoist (loop,
4778 extend_value_for_giv (v, bl->initial_value),
4779 v->mult_val, v->add_val, v->new_reg);
4785 /* Check for givs whose first use is their definition and whose
4786 last use is the definition of another giv. If so, it is likely
4787 dead and should not be used to derive another giv nor to
4788 eliminate a biv. */
4790 static void
4791 loop_givs_dead_check (loop, bl)
4792 struct loop *loop ATTRIBUTE_UNUSED;
4793 struct iv_class *bl;
4795 struct induction *v;
4797 for (v = bl->giv; v; v = v->next_iv)
4799 if (v->ignore
4800 || (v->same && v->same->ignore))
4801 continue;
4803 if (v->giv_type == DEST_REG
4804 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4806 struct induction *v1;
4808 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4809 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4810 v->maybe_dead = 1;
4816 static void
4817 loop_givs_rescan (loop, bl, reg_map)
4818 struct loop *loop;
4819 struct iv_class *bl;
4820 rtx *reg_map;
4822 struct induction *v;
4824 for (v = bl->giv; v; v = v->next_iv)
4826 if (v->same && v->same->ignore)
4827 v->ignore = 1;
4829 if (v->ignore)
4830 continue;
4832 /* Update expression if this was combined, in case other giv was
4833 replaced. */
4834 if (v->same)
4835 v->new_reg = replace_rtx (v->new_reg,
4836 v->same->dest_reg, v->same->new_reg);
4838 /* See if this register is known to be a pointer to something. If
4839 so, see if we can find the alignment. First see if there is a
4840 destination register that is a pointer. If so, this shares the
4841 alignment too. Next see if we can deduce anything from the
4842 computational information. If not, and this is a DEST_ADDR
4843 giv, at least we know that it's a pointer, though we don't know
4844 the alignment. */
4845 if (GET_CODE (v->new_reg) == REG
4846 && v->giv_type == DEST_REG
4847 && REG_POINTER (v->dest_reg))
4848 mark_reg_pointer (v->new_reg,
4849 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4850 else if (GET_CODE (v->new_reg) == REG
4851 && REG_POINTER (v->src_reg))
4853 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4855 if (align == 0
4856 || GET_CODE (v->add_val) != CONST_INT
4857 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4858 align = 0;
4860 mark_reg_pointer (v->new_reg, align);
4862 else if (GET_CODE (v->new_reg) == REG
4863 && GET_CODE (v->add_val) == REG
4864 && REG_POINTER (v->add_val))
4866 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4868 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4869 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4870 align = 0;
4872 mark_reg_pointer (v->new_reg, align);
4874 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4875 mark_reg_pointer (v->new_reg, 0);
4877 if (v->giv_type == DEST_ADDR)
4878 /* Store reduced reg as the address in the memref where we found
4879 this giv. */
4880 validate_change (v->insn, v->location, v->new_reg, 0);
4881 else if (v->replaceable)
4883 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4885 else
4887 rtx original_insn = v->insn;
4888 rtx note;
4890 /* Not replaceable; emit an insn to set the original giv reg from
4891 the reduced giv, same as above. */
4892 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4893 gen_move_insn (v->dest_reg,
4894 v->new_reg));
4896 /* The original insn may have a REG_EQUAL note. This note is
4897 now incorrect and may result in invalid substitutions later.
4898 The original insn is dead, but may be part of a libcall
4899 sequence, which doesn't seem worth the bother of handling. */
4900 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4901 if (note)
4902 remove_note (original_insn, note);
4905 /* When a loop is reversed, givs which depend on the reversed
4906 biv, and which are live outside the loop, must be set to their
4907 correct final value. This insn is only needed if the giv is
4908 not replaceable. The correct final value is the same as the
4909 value that the giv starts the reversed loop with. */
4910 if (bl->reversed && ! v->replaceable)
4911 loop_iv_add_mult_sink (loop,
4912 extend_value_for_giv (v, bl->initial_value),
4913 v->mult_val, v->add_val, v->dest_reg);
4914 else if (v->final_value)
4915 loop_insn_sink_or_swim (loop,
4916 gen_load_of_final_value (v->dest_reg,
4917 v->final_value));
4919 if (loop_dump_stream)
4921 fprintf (loop_dump_stream, "giv at %d reduced to ",
4922 INSN_UID (v->insn));
4923 print_simple_rtl (loop_dump_stream, v->new_reg);
4924 fprintf (loop_dump_stream, "\n");
4930 static int
4931 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4932 struct loop *loop ATTRIBUTE_UNUSED;
4933 struct iv_class *bl;
4934 struct induction *v;
4935 rtx test_reg;
4937 int add_cost;
4938 int benefit;
4940 benefit = v->benefit;
4941 PUT_MODE (test_reg, v->mode);
4942 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4943 test_reg, test_reg);
4945 /* Reduce benefit if not replaceable, since we will insert a
4946 move-insn to replace the insn that calculates this giv. Don't do
4947 this unless the giv is a user variable, since it will often be
4948 marked non-replaceable because of the duplication of the exit
4949 code outside the loop. In such a case, the copies we insert are
4950 dead and will be deleted. So they don't have a cost. Similar
4951 situations exist. */
4952 /* ??? The new final_[bg]iv_value code does a much better job of
4953 finding replaceable giv's, and hence this code may no longer be
4954 necessary. */
4955 if (! v->replaceable && ! bl->eliminable
4956 && REG_USERVAR_P (v->dest_reg))
4957 benefit -= copy_cost;
4959 /* Decrease the benefit to count the add-insns that we will insert
4960 to increment the reduced reg for the giv. ??? This can
4961 overestimate the run-time cost of the additional insns, e.g. if
4962 there are multiple basic blocks that increment the biv, but only
4963 one of these blocks is executed during each iteration. There is
4964 no good way to detect cases like this with the current structure
4965 of the loop optimizer. This code is more accurate for
4966 determining code size than run-time benefits. */
4967 benefit -= add_cost * bl->biv_count;
4969 /* Decide whether to strength-reduce this giv or to leave the code
4970 unchanged (recompute it from the biv each time it is used). This
4971 decision can be made independently for each giv. */
4973 #ifdef AUTO_INC_DEC
4974 /* Attempt to guess whether autoincrement will handle some of the
4975 new add insns; if so, increase BENEFIT (undo the subtraction of
4976 add_cost that was done above). */
4977 if (v->giv_type == DEST_ADDR
4978 /* Increasing the benefit is risky, since this is only a guess.
4979 Avoid increasing register pressure in cases where there would
4980 be no other benefit from reducing this giv. */
4981 && benefit > 0
4982 && GET_CODE (v->mult_val) == CONST_INT)
4984 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4986 if (HAVE_POST_INCREMENT
4987 && INTVAL (v->mult_val) == size)
4988 benefit += add_cost * bl->biv_count;
4989 else if (HAVE_PRE_INCREMENT
4990 && INTVAL (v->mult_val) == size)
4991 benefit += add_cost * bl->biv_count;
4992 else if (HAVE_POST_DECREMENT
4993 && -INTVAL (v->mult_val) == size)
4994 benefit += add_cost * bl->biv_count;
4995 else if (HAVE_PRE_DECREMENT
4996 && -INTVAL (v->mult_val) == size)
4997 benefit += add_cost * bl->biv_count;
4999 #endif
5001 return benefit;
5005 /* Free IV structures for LOOP. */
5007 static void
5008 loop_ivs_free (loop)
5009 struct loop *loop;
5011 struct loop_ivs *ivs = LOOP_IVS (loop);
5012 struct iv_class *iv = ivs->list;
5014 free (ivs->regs);
5016 while (iv)
5018 struct iv_class *next = iv->next;
5019 struct induction *induction;
5020 struct induction *next_induction;
5022 for (induction = iv->biv; induction; induction = next_induction)
5024 next_induction = induction->next_iv;
5025 free (induction);
5027 for (induction = iv->giv; induction; induction = next_induction)
5029 next_induction = induction->next_iv;
5030 free (induction);
5033 free (iv);
5034 iv = next;
5039 /* Perform strength reduction and induction variable elimination.
5041 Pseudo registers created during this function will be beyond the
5042 last valid index in several tables including
5043 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5044 problem here, because the added registers cannot be givs outside of
5045 their loop, and hence will never be reconsidered. But scan_loop
5046 must check regnos to make sure they are in bounds. */
5048 static void
5049 strength_reduce (loop, flags)
5050 struct loop *loop;
5051 int flags;
5053 struct loop_info *loop_info = LOOP_INFO (loop);
5054 struct loop_regs *regs = LOOP_REGS (loop);
5055 struct loop_ivs *ivs = LOOP_IVS (loop);
5056 rtx p;
5057 /* Temporary list pointer for traversing ivs->list. */
5058 struct iv_class *bl;
5059 /* Ratio of extra register life span we can justify
5060 for saving an instruction. More if loop doesn't call subroutines
5061 since in that case saving an insn makes more difference
5062 and more registers are available. */
5063 /* ??? could set this to last value of threshold in move_movables */
5064 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5065 /* Map of pseudo-register replacements. */
5066 rtx *reg_map = NULL;
5067 int reg_map_size;
5068 int unrolled_insn_copies = 0;
5069 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5070 int insn_count = count_insns_in_loop (loop);
5072 addr_placeholder = gen_reg_rtx (Pmode);
5074 ivs->n_regs = max_reg_before_loop;
5075 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5077 /* Find all BIVs in loop. */
5078 loop_bivs_find (loop);
5080 /* Exit if there are no bivs. */
5081 if (! ivs->list)
5083 /* Can still unroll the loop anyways, but indicate that there is no
5084 strength reduction info available. */
5085 if (flags & LOOP_UNROLL)
5086 unroll_loop (loop, insn_count, 0);
5088 loop_ivs_free (loop);
5089 return;
5092 /* Determine how BIVS are initialised by looking through pre-header
5093 extended basic block. */
5094 loop_bivs_init_find (loop);
5096 /* Look at the each biv and see if we can say anything better about its
5097 initial value from any initializing insns set up above. */
5098 loop_bivs_check (loop);
5100 /* Search the loop for general induction variables. */
5101 loop_givs_find (loop);
5103 /* Try to calculate and save the number of loop iterations. This is
5104 set to zero if the actual number can not be calculated. This must
5105 be called after all giv's have been identified, since otherwise it may
5106 fail if the iteration variable is a giv. */
5107 loop_iterations (loop);
5109 #ifdef HAVE_prefetch
5110 if (flags & LOOP_PREFETCH)
5111 emit_prefetch_instructions (loop);
5112 #endif
5114 /* Now for each giv for which we still don't know whether or not it is
5115 replaceable, check to see if it is replaceable because its final value
5116 can be calculated. This must be done after loop_iterations is called,
5117 so that final_giv_value will work correctly. */
5118 loop_givs_check (loop);
5120 /* Try to prove that the loop counter variable (if any) is always
5121 nonnegative; if so, record that fact with a REG_NONNEG note
5122 so that "decrement and branch until zero" insn can be used. */
5123 check_dbra_loop (loop, insn_count);
5125 /* Create reg_map to hold substitutions for replaceable giv regs.
5126 Some givs might have been made from biv increments, so look at
5127 ivs->reg_iv_type for a suitable size. */
5128 reg_map_size = ivs->n_regs;
5129 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5131 /* Examine each iv class for feasibility of strength reduction/induction
5132 variable elimination. */
5134 for (bl = ivs->list; bl; bl = bl->next)
5136 struct induction *v;
5137 int benefit;
5139 /* Test whether it will be possible to eliminate this biv
5140 provided all givs are reduced. */
5141 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5143 /* This will be true at the end, if all givs which depend on this
5144 biv have been strength reduced.
5145 We can't (currently) eliminate the biv unless this is so. */
5146 bl->all_reduced = 1;
5148 /* Check each extension dependent giv in this class to see if its
5149 root biv is safe from wrapping in the interior mode. */
5150 check_ext_dependent_givs (bl, loop_info);
5152 /* Combine all giv's for this iv_class. */
5153 combine_givs (regs, bl);
5155 for (v = bl->giv; v; v = v->next_iv)
5157 struct induction *tv;
5159 if (v->ignore || v->same)
5160 continue;
5162 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5164 /* If an insn is not to be strength reduced, then set its ignore
5165 flag, and clear bl->all_reduced. */
5167 /* A giv that depends on a reversed biv must be reduced if it is
5168 used after the loop exit, otherwise, it would have the wrong
5169 value after the loop exit. To make it simple, just reduce all
5170 of such giv's whether or not we know they are used after the loop
5171 exit. */
5173 if (! flag_reduce_all_givs
5174 && v->lifetime * threshold * benefit < insn_count
5175 && ! bl->reversed)
5177 if (loop_dump_stream)
5178 fprintf (loop_dump_stream,
5179 "giv of insn %d not worth while, %d vs %d.\n",
5180 INSN_UID (v->insn),
5181 v->lifetime * threshold * benefit, insn_count);
5182 v->ignore = 1;
5183 bl->all_reduced = 0;
5185 else
5187 /* Check that we can increment the reduced giv without a
5188 multiply insn. If not, reject it. */
5190 for (tv = bl->biv; tv; tv = tv->next_iv)
5191 if (tv->mult_val == const1_rtx
5192 && ! product_cheap_p (tv->add_val, v->mult_val))
5194 if (loop_dump_stream)
5195 fprintf (loop_dump_stream,
5196 "giv of insn %d: would need a multiply.\n",
5197 INSN_UID (v->insn));
5198 v->ignore = 1;
5199 bl->all_reduced = 0;
5200 break;
5205 /* Check for givs whose first use is their definition and whose
5206 last use is the definition of another giv. If so, it is likely
5207 dead and should not be used to derive another giv nor to
5208 eliminate a biv. */
5209 loop_givs_dead_check (loop, bl);
5211 /* Reduce each giv that we decided to reduce. */
5212 loop_givs_reduce (loop, bl);
5214 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5215 as not reduced.
5217 For each giv register that can be reduced now: if replaceable,
5218 substitute reduced reg wherever the old giv occurs;
5219 else add new move insn "giv_reg = reduced_reg". */
5220 loop_givs_rescan (loop, bl, reg_map);
5222 /* All the givs based on the biv bl have been reduced if they
5223 merit it. */
5225 /* For each giv not marked as maybe dead that has been combined with a
5226 second giv, clear any "maybe dead" mark on that second giv.
5227 v->new_reg will either be or refer to the register of the giv it
5228 combined with.
5230 Doing this clearing avoids problems in biv elimination where
5231 a giv's new_reg is a complex value that can't be put in the
5232 insn but the giv combined with (with a reg as new_reg) is
5233 marked maybe_dead. Since the register will be used in either
5234 case, we'd prefer it be used from the simpler giv. */
5236 for (v = bl->giv; v; v = v->next_iv)
5237 if (! v->maybe_dead && v->same)
5238 v->same->maybe_dead = 0;
5240 /* Try to eliminate the biv, if it is a candidate.
5241 This won't work if ! bl->all_reduced,
5242 since the givs we planned to use might not have been reduced.
5244 We have to be careful that we didn't initially think we could
5245 eliminate this biv because of a giv that we now think may be
5246 dead and shouldn't be used as a biv replacement.
5248 Also, there is the possibility that we may have a giv that looks
5249 like it can be used to eliminate a biv, but the resulting insn
5250 isn't valid. This can happen, for example, on the 88k, where a
5251 JUMP_INSN can compare a register only with zero. Attempts to
5252 replace it with a compare with a constant will fail.
5254 Note that in cases where this call fails, we may have replaced some
5255 of the occurrences of the biv with a giv, but no harm was done in
5256 doing so in the rare cases where it can occur. */
5258 if (bl->all_reduced == 1 && bl->eliminable
5259 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5261 /* ?? If we created a new test to bypass the loop entirely,
5262 or otherwise drop straight in, based on this test, then
5263 we might want to rewrite it also. This way some later
5264 pass has more hope of removing the initialization of this
5265 biv entirely. */
5267 /* If final_value != 0, then the biv may be used after loop end
5268 and we must emit an insn to set it just in case.
5270 Reversed bivs already have an insn after the loop setting their
5271 value, so we don't need another one. We can't calculate the
5272 proper final value for such a biv here anyways. */
5273 if (bl->final_value && ! bl->reversed)
5274 loop_insn_sink_or_swim (loop,
5275 gen_load_of_final_value (bl->biv->dest_reg,
5276 bl->final_value));
5278 if (loop_dump_stream)
5279 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5280 bl->regno);
5282 /* See above note wrt final_value. But since we couldn't eliminate
5283 the biv, we must set the value after the loop instead of before. */
5284 else if (bl->final_value && ! bl->reversed)
5285 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5286 bl->final_value));
5289 /* Go through all the instructions in the loop, making all the
5290 register substitutions scheduled in REG_MAP. */
5292 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5293 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5294 || GET_CODE (p) == CALL_INSN)
5296 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5297 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5298 INSN_CODE (p) = -1;
5301 if (loop_info->n_iterations > 0)
5303 /* When we completely unroll a loop we will likely not need the increment
5304 of the loop BIV and we will not need the conditional branch at the
5305 end of the loop. */
5306 unrolled_insn_copies = insn_count - 2;
5308 #ifdef HAVE_cc0
5309 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5310 need the comparison before the conditional branch at the end of the
5311 loop. */
5312 unrolled_insn_copies -= 1;
5313 #endif
5315 /* We'll need one copy for each loop iteration. */
5316 unrolled_insn_copies *= loop_info->n_iterations;
5318 /* A little slop to account for the ability to remove initialization
5319 code, better CSE, and other secondary benefits of completely
5320 unrolling some loops. */
5321 unrolled_insn_copies -= 1;
5323 /* Clamp the value. */
5324 if (unrolled_insn_copies < 0)
5325 unrolled_insn_copies = 0;
5328 /* Unroll loops from within strength reduction so that we can use the
5329 induction variable information that strength_reduce has already
5330 collected. Always unroll loops that would be as small or smaller
5331 unrolled than when rolled. */
5332 if ((flags & LOOP_UNROLL)
5333 || ((flags & LOOP_AUTO_UNROLL)
5334 && loop_info->n_iterations > 0
5335 && unrolled_insn_copies <= insn_count))
5336 unroll_loop (loop, insn_count, 1);
5338 #ifdef HAVE_doloop_end
5339 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5340 doloop_optimize (loop);
5341 #endif /* HAVE_doloop_end */
5343 /* In case number of iterations is known, drop branch prediction note
5344 in the branch. Do that only in second loop pass, as loop unrolling
5345 may change the number of iterations performed. */
5346 if (flags & LOOP_BCT)
5348 unsigned HOST_WIDE_INT n
5349 = loop_info->n_iterations / loop_info->unroll_number;
5350 if (n > 1)
5351 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
5352 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5355 if (loop_dump_stream)
5356 fprintf (loop_dump_stream, "\n");
5358 loop_ivs_free (loop);
5359 if (reg_map)
5360 free (reg_map);
5363 /*Record all basic induction variables calculated in the insn. */
5364 static rtx
5365 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5366 struct loop *loop;
5367 rtx p;
5368 int not_every_iteration;
5369 int maybe_multiple;
5371 struct loop_ivs *ivs = LOOP_IVS (loop);
5372 rtx set;
5373 rtx dest_reg;
5374 rtx inc_val;
5375 rtx mult_val;
5376 rtx *location;
5378 if (GET_CODE (p) == INSN
5379 && (set = single_set (p))
5380 && GET_CODE (SET_DEST (set)) == REG)
5382 dest_reg = SET_DEST (set);
5383 if (REGNO (dest_reg) < max_reg_before_loop
5384 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5385 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5387 if (basic_induction_var (loop, SET_SRC (set),
5388 GET_MODE (SET_SRC (set)),
5389 dest_reg, p, &inc_val, &mult_val,
5390 &location))
5392 /* It is a possible basic induction variable.
5393 Create and initialize an induction structure for it. */
5395 struct induction *v
5396 = (struct induction *) xmalloc (sizeof (struct induction));
5398 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5399 not_every_iteration, maybe_multiple);
5400 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5402 else if (REGNO (dest_reg) < ivs->n_regs)
5403 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5406 return p;
5409 /* Record all givs calculated in the insn.
5410 A register is a giv if: it is only set once, it is a function of a
5411 biv and a constant (or invariant), and it is not a biv. */
5412 static rtx
5413 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5414 struct loop *loop;
5415 rtx p;
5416 int not_every_iteration;
5417 int maybe_multiple;
5419 struct loop_regs *regs = LOOP_REGS (loop);
5421 rtx set;
5422 /* Look for a general induction variable in a register. */
5423 if (GET_CODE (p) == INSN
5424 && (set = single_set (p))
5425 && GET_CODE (SET_DEST (set)) == REG
5426 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5428 rtx src_reg;
5429 rtx dest_reg;
5430 rtx add_val;
5431 rtx mult_val;
5432 rtx ext_val;
5433 int benefit;
5434 rtx regnote = 0;
5435 rtx last_consec_insn;
5437 dest_reg = SET_DEST (set);
5438 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5439 return p;
5441 if (/* SET_SRC is a giv. */
5442 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5443 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5444 /* Equivalent expression is a giv. */
5445 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5446 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5447 &add_val, &mult_val, &ext_val, 0,
5448 &benefit, VOIDmode)))
5449 /* Don't try to handle any regs made by loop optimization.
5450 We have nothing on them in regno_first_uid, etc. */
5451 && REGNO (dest_reg) < max_reg_before_loop
5452 /* Don't recognize a BASIC_INDUCT_VAR here. */
5453 && dest_reg != src_reg
5454 /* This must be the only place where the register is set. */
5455 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5456 /* or all sets must be consecutive and make a giv. */
5457 || (benefit = consec_sets_giv (loop, benefit, p,
5458 src_reg, dest_reg,
5459 &add_val, &mult_val, &ext_val,
5460 &last_consec_insn))))
5462 struct induction *v
5463 = (struct induction *) xmalloc (sizeof (struct induction));
5465 /* If this is a library call, increase benefit. */
5466 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5467 benefit += libcall_benefit (p);
5469 /* Skip the consecutive insns, if there are any. */
5470 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5471 p = last_consec_insn;
5473 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5474 ext_val, benefit, DEST_REG, not_every_iteration,
5475 maybe_multiple, (rtx*) 0);
5480 #ifndef DONT_REDUCE_ADDR
5481 /* Look for givs which are memory addresses. */
5482 /* This resulted in worse code on a VAX 8600. I wonder if it
5483 still does. */
5484 if (GET_CODE (p) == INSN)
5485 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5486 maybe_multiple);
5487 #endif
5489 /* Update the status of whether giv can derive other givs. This can
5490 change when we pass a label or an insn that updates a biv. */
5491 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5492 || GET_CODE (p) == CODE_LABEL)
5493 update_giv_derive (loop, p);
5494 return p;
5497 /* Return 1 if X is a valid source for an initial value (or as value being
5498 compared against in an initial test).
5500 X must be either a register or constant and must not be clobbered between
5501 the current insn and the start of the loop.
5503 INSN is the insn containing X. */
5505 static int
5506 valid_initial_value_p (x, insn, call_seen, loop_start)
5507 rtx x;
5508 rtx insn;
5509 int call_seen;
5510 rtx loop_start;
5512 if (CONSTANT_P (x))
5513 return 1;
5515 /* Only consider pseudos we know about initialized in insns whose luids
5516 we know. */
5517 if (GET_CODE (x) != REG
5518 || REGNO (x) >= max_reg_before_loop)
5519 return 0;
5521 /* Don't use call-clobbered registers across a call which clobbers it. On
5522 some machines, don't use any hard registers at all. */
5523 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5524 && (SMALL_REGISTER_CLASSES
5525 || (call_used_regs[REGNO (x)] && call_seen)))
5526 return 0;
5528 /* Don't use registers that have been clobbered before the start of the
5529 loop. */
5530 if (reg_set_between_p (x, insn, loop_start))
5531 return 0;
5533 return 1;
5536 /* Scan X for memory refs and check each memory address
5537 as a possible giv. INSN is the insn whose pattern X comes from.
5538 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5539 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5540 more thanonce in each loop iteration. */
5542 static void
5543 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5544 const struct loop *loop;
5545 rtx x;
5546 rtx insn;
5547 int not_every_iteration, maybe_multiple;
5549 int i, j;
5550 enum rtx_code code;
5551 const char *fmt;
5553 if (x == 0)
5554 return;
5556 code = GET_CODE (x);
5557 switch (code)
5559 case REG:
5560 case CONST_INT:
5561 case CONST:
5562 case CONST_DOUBLE:
5563 case SYMBOL_REF:
5564 case LABEL_REF:
5565 case PC:
5566 case CC0:
5567 case ADDR_VEC:
5568 case ADDR_DIFF_VEC:
5569 case USE:
5570 case CLOBBER:
5571 return;
5573 case MEM:
5575 rtx src_reg;
5576 rtx add_val;
5577 rtx mult_val;
5578 rtx ext_val;
5579 int benefit;
5581 /* This code used to disable creating GIVs with mult_val == 1 and
5582 add_val == 0. However, this leads to lost optimizations when
5583 it comes time to combine a set of related DEST_ADDR GIVs, since
5584 this one would not be seen. */
5586 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5587 &mult_val, &ext_val, 1, &benefit,
5588 GET_MODE (x)))
5590 /* Found one; record it. */
5591 struct induction *v
5592 = (struct induction *) xmalloc (sizeof (struct induction));
5594 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5595 add_val, ext_val, benefit, DEST_ADDR,
5596 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5598 v->mem = x;
5601 return;
5603 default:
5604 break;
5607 /* Recursively scan the subexpressions for other mem refs. */
5609 fmt = GET_RTX_FORMAT (code);
5610 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5611 if (fmt[i] == 'e')
5612 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5613 maybe_multiple);
5614 else if (fmt[i] == 'E')
5615 for (j = 0; j < XVECLEN (x, i); j++)
5616 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5617 maybe_multiple);
5620 /* Fill in the data about one biv update.
5621 V is the `struct induction' in which we record the biv. (It is
5622 allocated by the caller, with alloca.)
5623 INSN is the insn that sets it.
5624 DEST_REG is the biv's reg.
5626 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5627 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5628 being set to INC_VAL.
5630 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5631 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5632 can be executed more than once per iteration. If MAYBE_MULTIPLE
5633 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5634 executed exactly once per iteration. */
5636 static void
5637 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5638 not_every_iteration, maybe_multiple)
5639 struct loop *loop;
5640 struct induction *v;
5641 rtx insn;
5642 rtx dest_reg;
5643 rtx inc_val;
5644 rtx mult_val;
5645 rtx *location;
5646 int not_every_iteration;
5647 int maybe_multiple;
5649 struct loop_ivs *ivs = LOOP_IVS (loop);
5650 struct iv_class *bl;
5652 v->insn = insn;
5653 v->src_reg = dest_reg;
5654 v->dest_reg = dest_reg;
5655 v->mult_val = mult_val;
5656 v->add_val = inc_val;
5657 v->ext_dependent = NULL_RTX;
5658 v->location = location;
5659 v->mode = GET_MODE (dest_reg);
5660 v->always_computable = ! not_every_iteration;
5661 v->always_executed = ! not_every_iteration;
5662 v->maybe_multiple = maybe_multiple;
5664 /* Add this to the reg's iv_class, creating a class
5665 if this is the first incrementation of the reg. */
5667 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5668 if (bl == 0)
5670 /* Create and initialize new iv_class. */
5672 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5674 bl->regno = REGNO (dest_reg);
5675 bl->biv = 0;
5676 bl->giv = 0;
5677 bl->biv_count = 0;
5678 bl->giv_count = 0;
5680 /* Set initial value to the reg itself. */
5681 bl->initial_value = dest_reg;
5682 bl->final_value = 0;
5683 /* We haven't seen the initializing insn yet */
5684 bl->init_insn = 0;
5685 bl->init_set = 0;
5686 bl->initial_test = 0;
5687 bl->incremented = 0;
5688 bl->eliminable = 0;
5689 bl->nonneg = 0;
5690 bl->reversed = 0;
5691 bl->total_benefit = 0;
5693 /* Add this class to ivs->list. */
5694 bl->next = ivs->list;
5695 ivs->list = bl;
5697 /* Put it in the array of biv register classes. */
5698 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5701 /* Update IV_CLASS entry for this biv. */
5702 v->next_iv = bl->biv;
5703 bl->biv = v;
5704 bl->biv_count++;
5705 if (mult_val == const1_rtx)
5706 bl->incremented = 1;
5708 if (loop_dump_stream)
5709 loop_biv_dump (v, loop_dump_stream, 0);
5712 /* Fill in the data about one giv.
5713 V is the `struct induction' in which we record the giv. (It is
5714 allocated by the caller, with alloca.)
5715 INSN is the insn that sets it.
5716 BENEFIT estimates the savings from deleting this insn.
5717 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5718 into a register or is used as a memory address.
5720 SRC_REG is the biv reg which the giv is computed from.
5721 DEST_REG is the giv's reg (if the giv is stored in a reg).
5722 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5723 LOCATION points to the place where this giv's value appears in INSN. */
5725 static void
5726 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5727 benefit, type, not_every_iteration, maybe_multiple, location)
5728 const struct loop *loop;
5729 struct induction *v;
5730 rtx insn;
5731 rtx src_reg;
5732 rtx dest_reg;
5733 rtx mult_val, add_val, ext_val;
5734 int benefit;
5735 enum g_types type;
5736 int not_every_iteration, maybe_multiple;
5737 rtx *location;
5739 struct loop_ivs *ivs = LOOP_IVS (loop);
5740 struct induction *b;
5741 struct iv_class *bl;
5742 rtx set = single_set (insn);
5743 rtx temp;
5745 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5746 undo the MULT canonicalization that we performed earlier. */
5747 temp = simplify_rtx (add_val);
5748 if (temp
5749 && ! (GET_CODE (add_val) == MULT
5750 && GET_CODE (temp) == ASHIFT))
5751 add_val = temp;
5753 v->insn = insn;
5754 v->src_reg = src_reg;
5755 v->giv_type = type;
5756 v->dest_reg = dest_reg;
5757 v->mult_val = mult_val;
5758 v->add_val = add_val;
5759 v->ext_dependent = ext_val;
5760 v->benefit = benefit;
5761 v->location = location;
5762 v->cant_derive = 0;
5763 v->combined_with = 0;
5764 v->maybe_multiple = maybe_multiple;
5765 v->maybe_dead = 0;
5766 v->derive_adjustment = 0;
5767 v->same = 0;
5768 v->ignore = 0;
5769 v->new_reg = 0;
5770 v->final_value = 0;
5771 v->same_insn = 0;
5772 v->auto_inc_opt = 0;
5773 v->unrolled = 0;
5774 v->shared = 0;
5776 /* The v->always_computable field is used in update_giv_derive, to
5777 determine whether a giv can be used to derive another giv. For a
5778 DEST_REG giv, INSN computes a new value for the giv, so its value
5779 isn't computable if INSN insn't executed every iteration.
5780 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5781 it does not compute a new value. Hence the value is always computable
5782 regardless of whether INSN is executed each iteration. */
5784 if (type == DEST_ADDR)
5785 v->always_computable = 1;
5786 else
5787 v->always_computable = ! not_every_iteration;
5789 v->always_executed = ! not_every_iteration;
5791 if (type == DEST_ADDR)
5793 v->mode = GET_MODE (*location);
5794 v->lifetime = 1;
5796 else /* type == DEST_REG */
5798 v->mode = GET_MODE (SET_DEST (set));
5800 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5802 /* If the lifetime is zero, it means that this register is
5803 really a dead store. So mark this as a giv that can be
5804 ignored. This will not prevent the biv from being eliminated. */
5805 if (v->lifetime == 0)
5806 v->ignore = 1;
5808 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5809 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5812 /* Add the giv to the class of givs computed from one biv. */
5814 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5815 if (bl)
5817 v->next_iv = bl->giv;
5818 bl->giv = v;
5819 /* Don't count DEST_ADDR. This is supposed to count the number of
5820 insns that calculate givs. */
5821 if (type == DEST_REG)
5822 bl->giv_count++;
5823 bl->total_benefit += benefit;
5825 else
5826 /* Fatal error, biv missing for this giv? */
5827 abort ();
5829 if (type == DEST_ADDR)
5830 v->replaceable = 1;
5831 else
5833 /* The giv can be replaced outright by the reduced register only if all
5834 of the following conditions are true:
5835 - the insn that sets the giv is always executed on any iteration
5836 on which the giv is used at all
5837 (there are two ways to deduce this:
5838 either the insn is executed on every iteration,
5839 or all uses follow that insn in the same basic block),
5840 - the giv is not used outside the loop
5841 - no assignments to the biv occur during the giv's lifetime. */
5843 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5844 /* Previous line always fails if INSN was moved by loop opt. */
5845 && REGNO_LAST_LUID (REGNO (dest_reg))
5846 < INSN_LUID (loop->end)
5847 && (! not_every_iteration
5848 || last_use_this_basic_block (dest_reg, insn)))
5850 /* Now check that there are no assignments to the biv within the
5851 giv's lifetime. This requires two separate checks. */
5853 /* Check each biv update, and fail if any are between the first
5854 and last use of the giv.
5856 If this loop contains an inner loop that was unrolled, then
5857 the insn modifying the biv may have been emitted by the loop
5858 unrolling code, and hence does not have a valid luid. Just
5859 mark the biv as not replaceable in this case. It is not very
5860 useful as a biv, because it is used in two different loops.
5861 It is very unlikely that we would be able to optimize the giv
5862 using this biv anyways. */
5864 v->replaceable = 1;
5865 for (b = bl->biv; b; b = b->next_iv)
5867 if (INSN_UID (b->insn) >= max_uid_for_loop
5868 || ((INSN_LUID (b->insn)
5869 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5870 && (INSN_LUID (b->insn)
5871 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5873 v->replaceable = 0;
5874 v->not_replaceable = 1;
5875 break;
5879 /* If there are any backwards branches that go from after the
5880 biv update to before it, then this giv is not replaceable. */
5881 if (v->replaceable)
5882 for (b = bl->biv; b; b = b->next_iv)
5883 if (back_branch_in_range_p (loop, b->insn))
5885 v->replaceable = 0;
5886 v->not_replaceable = 1;
5887 break;
5890 else
5892 /* May still be replaceable, we don't have enough info here to
5893 decide. */
5894 v->replaceable = 0;
5895 v->not_replaceable = 0;
5899 /* Record whether the add_val contains a const_int, for later use by
5900 combine_givs. */
5902 rtx tem = add_val;
5904 v->no_const_addval = 1;
5905 if (tem == const0_rtx)
5907 else if (CONSTANT_P (add_val))
5908 v->no_const_addval = 0;
5909 if (GET_CODE (tem) == PLUS)
5911 while (1)
5913 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5914 tem = XEXP (tem, 0);
5915 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5916 tem = XEXP (tem, 1);
5917 else
5918 break;
5920 if (CONSTANT_P (XEXP (tem, 1)))
5921 v->no_const_addval = 0;
5925 if (loop_dump_stream)
5926 loop_giv_dump (v, loop_dump_stream, 0);
5929 /* All this does is determine whether a giv can be made replaceable because
5930 its final value can be calculated. This code can not be part of record_giv
5931 above, because final_giv_value requires that the number of loop iterations
5932 be known, and that can not be accurately calculated until after all givs
5933 have been identified. */
5935 static void
5936 check_final_value (loop, v)
5937 const struct loop *loop;
5938 struct induction *v;
5940 struct loop_ivs *ivs = LOOP_IVS (loop);
5941 struct iv_class *bl;
5942 rtx final_value = 0;
5944 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5946 /* DEST_ADDR givs will never reach here, because they are always marked
5947 replaceable above in record_giv. */
5949 /* The giv can be replaced outright by the reduced register only if all
5950 of the following conditions are true:
5951 - the insn that sets the giv is always executed on any iteration
5952 on which the giv is used at all
5953 (there are two ways to deduce this:
5954 either the insn is executed on every iteration,
5955 or all uses follow that insn in the same basic block),
5956 - its final value can be calculated (this condition is different
5957 than the one above in record_giv)
5958 - it's not used before the it's set
5959 - no assignments to the biv occur during the giv's lifetime. */
5961 #if 0
5962 /* This is only called now when replaceable is known to be false. */
5963 /* Clear replaceable, so that it won't confuse final_giv_value. */
5964 v->replaceable = 0;
5965 #endif
5967 if ((final_value = final_giv_value (loop, v))
5968 && (v->always_executed
5969 || last_use_this_basic_block (v->dest_reg, v->insn)))
5971 int biv_increment_seen = 0, before_giv_insn = 0;
5972 rtx p = v->insn;
5973 rtx last_giv_use;
5975 v->replaceable = 1;
5977 /* When trying to determine whether or not a biv increment occurs
5978 during the lifetime of the giv, we can ignore uses of the variable
5979 outside the loop because final_value is true. Hence we can not
5980 use regno_last_uid and regno_first_uid as above in record_giv. */
5982 /* Search the loop to determine whether any assignments to the
5983 biv occur during the giv's lifetime. Start with the insn
5984 that sets the giv, and search around the loop until we come
5985 back to that insn again.
5987 Also fail if there is a jump within the giv's lifetime that jumps
5988 to somewhere outside the lifetime but still within the loop. This
5989 catches spaghetti code where the execution order is not linear, and
5990 hence the above test fails. Here we assume that the giv lifetime
5991 does not extend from one iteration of the loop to the next, so as
5992 to make the test easier. Since the lifetime isn't known yet,
5993 this requires two loops. See also record_giv above. */
5995 last_giv_use = v->insn;
5997 while (1)
5999 p = NEXT_INSN (p);
6000 if (p == loop->end)
6002 before_giv_insn = 1;
6003 p = NEXT_INSN (loop->start);
6005 if (p == v->insn)
6006 break;
6008 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
6009 || GET_CODE (p) == CALL_INSN)
6011 /* It is possible for the BIV increment to use the GIV if we
6012 have a cycle. Thus we must be sure to check each insn for
6013 both BIV and GIV uses, and we must check for BIV uses
6014 first. */
6016 if (! biv_increment_seen
6017 && reg_set_p (v->src_reg, PATTERN (p)))
6018 biv_increment_seen = 1;
6020 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6022 if (biv_increment_seen || before_giv_insn)
6024 v->replaceable = 0;
6025 v->not_replaceable = 1;
6026 break;
6028 last_giv_use = p;
6033 /* Now that the lifetime of the giv is known, check for branches
6034 from within the lifetime to outside the lifetime if it is still
6035 replaceable. */
6037 if (v->replaceable)
6039 p = v->insn;
6040 while (1)
6042 p = NEXT_INSN (p);
6043 if (p == loop->end)
6044 p = NEXT_INSN (loop->start);
6045 if (p == last_giv_use)
6046 break;
6048 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6049 && LABEL_NAME (JUMP_LABEL (p))
6050 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6051 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6052 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6053 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6055 v->replaceable = 0;
6056 v->not_replaceable = 1;
6058 if (loop_dump_stream)
6059 fprintf (loop_dump_stream,
6060 "Found branch outside giv lifetime.\n");
6062 break;
6067 /* If it is replaceable, then save the final value. */
6068 if (v->replaceable)
6069 v->final_value = final_value;
6072 if (loop_dump_stream && v->replaceable)
6073 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6074 INSN_UID (v->insn), REGNO (v->dest_reg));
6077 /* Update the status of whether a giv can derive other givs.
6079 We need to do something special if there is or may be an update to the biv
6080 between the time the giv is defined and the time it is used to derive
6081 another giv.
6083 In addition, a giv that is only conditionally set is not allowed to
6084 derive another giv once a label has been passed.
6086 The cases we look at are when a label or an update to a biv is passed. */
6088 static void
6089 update_giv_derive (loop, p)
6090 const struct loop *loop;
6091 rtx p;
6093 struct loop_ivs *ivs = LOOP_IVS (loop);
6094 struct iv_class *bl;
6095 struct induction *biv, *giv;
6096 rtx tem;
6097 int dummy;
6099 /* Search all IV classes, then all bivs, and finally all givs.
6101 There are three cases we are concerned with. First we have the situation
6102 of a giv that is only updated conditionally. In that case, it may not
6103 derive any givs after a label is passed.
6105 The second case is when a biv update occurs, or may occur, after the
6106 definition of a giv. For certain biv updates (see below) that are
6107 known to occur between the giv definition and use, we can adjust the
6108 giv definition. For others, or when the biv update is conditional,
6109 we must prevent the giv from deriving any other givs. There are two
6110 sub-cases within this case.
6112 If this is a label, we are concerned with any biv update that is done
6113 conditionally, since it may be done after the giv is defined followed by
6114 a branch here (actually, we need to pass both a jump and a label, but
6115 this extra tracking doesn't seem worth it).
6117 If this is a jump, we are concerned about any biv update that may be
6118 executed multiple times. We are actually only concerned about
6119 backward jumps, but it is probably not worth performing the test
6120 on the jump again here.
6122 If this is a biv update, we must adjust the giv status to show that a
6123 subsequent biv update was performed. If this adjustment cannot be done,
6124 the giv cannot derive further givs. */
6126 for (bl = ivs->list; bl; bl = bl->next)
6127 for (biv = bl->biv; biv; biv = biv->next_iv)
6128 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6129 || biv->insn == p)
6131 for (giv = bl->giv; giv; giv = giv->next_iv)
6133 /* If cant_derive is already true, there is no point in
6134 checking all of these conditions again. */
6135 if (giv->cant_derive)
6136 continue;
6138 /* If this giv is conditionally set and we have passed a label,
6139 it cannot derive anything. */
6140 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6141 giv->cant_derive = 1;
6143 /* Skip givs that have mult_val == 0, since
6144 they are really invariants. Also skip those that are
6145 replaceable, since we know their lifetime doesn't contain
6146 any biv update. */
6147 else if (giv->mult_val == const0_rtx || giv->replaceable)
6148 continue;
6150 /* The only way we can allow this giv to derive another
6151 is if this is a biv increment and we can form the product
6152 of biv->add_val and giv->mult_val. In this case, we will
6153 be able to compute a compensation. */
6154 else if (biv->insn == p)
6156 rtx ext_val_dummy;
6158 tem = 0;
6159 if (biv->mult_val == const1_rtx)
6160 tem = simplify_giv_expr (loop,
6161 gen_rtx_MULT (giv->mode,
6162 biv->add_val,
6163 giv->mult_val),
6164 &ext_val_dummy, &dummy);
6166 if (tem && giv->derive_adjustment)
6167 tem = simplify_giv_expr
6168 (loop,
6169 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6170 &ext_val_dummy, &dummy);
6172 if (tem)
6173 giv->derive_adjustment = tem;
6174 else
6175 giv->cant_derive = 1;
6177 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6178 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6179 giv->cant_derive = 1;
6184 /* Check whether an insn is an increment legitimate for a basic induction var.
6185 X is the source of insn P, or a part of it.
6186 MODE is the mode in which X should be interpreted.
6188 DEST_REG is the putative biv, also the destination of the insn.
6189 We accept patterns of these forms:
6190 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6191 REG = INVARIANT + REG
6193 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6194 store the additive term into *INC_VAL, and store the place where
6195 we found the additive term into *LOCATION.
6197 If X is an assignment of an invariant into DEST_REG, we set
6198 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6200 We also want to detect a BIV when it corresponds to a variable
6201 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6202 of the variable may be a PLUS that adds a SUBREG of that variable to
6203 an invariant and then sign- or zero-extends the result of the PLUS
6204 into the variable.
6206 Most GIVs in such cases will be in the promoted mode, since that is the
6207 probably the natural computation mode (and almost certainly the mode
6208 used for addresses) on the machine. So we view the pseudo-reg containing
6209 the variable as the BIV, as if it were simply incremented.
6211 Note that treating the entire pseudo as a BIV will result in making
6212 simple increments to any GIVs based on it. However, if the variable
6213 overflows in its declared mode but not its promoted mode, the result will
6214 be incorrect. This is acceptable if the variable is signed, since
6215 overflows in such cases are undefined, but not if it is unsigned, since
6216 those overflows are defined. So we only check for SIGN_EXTEND and
6217 not ZERO_EXTEND.
6219 If we cannot find a biv, we return 0. */
6221 static int
6222 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6223 const struct loop *loop;
6224 rtx x;
6225 enum machine_mode mode;
6226 rtx dest_reg;
6227 rtx p;
6228 rtx *inc_val;
6229 rtx *mult_val;
6230 rtx **location;
6232 enum rtx_code code;
6233 rtx *argp, arg;
6234 rtx insn, set = 0;
6236 code = GET_CODE (x);
6237 *location = NULL;
6238 switch (code)
6240 case PLUS:
6241 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6242 || (GET_CODE (XEXP (x, 0)) == SUBREG
6243 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6244 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6246 argp = &XEXP (x, 1);
6248 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6249 || (GET_CODE (XEXP (x, 1)) == SUBREG
6250 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6251 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6253 argp = &XEXP (x, 0);
6255 else
6256 return 0;
6258 arg = *argp;
6259 if (loop_invariant_p (loop, arg) != 1)
6260 return 0;
6262 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6263 *mult_val = const1_rtx;
6264 *location = argp;
6265 return 1;
6267 case SUBREG:
6268 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6269 handle addition of promoted variables.
6270 ??? The comment at the start of this function is wrong: promoted
6271 variable increments don't look like it says they do. */
6272 return basic_induction_var (loop, SUBREG_REG (x),
6273 GET_MODE (SUBREG_REG (x)),
6274 dest_reg, p, inc_val, mult_val, location);
6276 case REG:
6277 /* If this register is assigned in a previous insn, look at its
6278 source, but don't go outside the loop or past a label. */
6280 /* If this sets a register to itself, we would repeat any previous
6281 biv increment if we applied this strategy blindly. */
6282 if (rtx_equal_p (dest_reg, x))
6283 return 0;
6285 insn = p;
6286 while (1)
6288 rtx dest;
6291 insn = PREV_INSN (insn);
6293 while (insn && GET_CODE (insn) == NOTE
6294 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6296 if (!insn)
6297 break;
6298 set = single_set (insn);
6299 if (set == 0)
6300 break;
6301 dest = SET_DEST (set);
6302 if (dest == x
6303 || (GET_CODE (dest) == SUBREG
6304 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6305 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6306 && SUBREG_REG (dest) == x))
6307 return basic_induction_var (loop, SET_SRC (set),
6308 (GET_MODE (SET_SRC (set)) == VOIDmode
6309 ? GET_MODE (x)
6310 : GET_MODE (SET_SRC (set))),
6311 dest_reg, insn,
6312 inc_val, mult_val, location);
6314 while (GET_CODE (dest) == SIGN_EXTRACT
6315 || GET_CODE (dest) == ZERO_EXTRACT
6316 || GET_CODE (dest) == SUBREG
6317 || GET_CODE (dest) == STRICT_LOW_PART)
6318 dest = XEXP (dest, 0);
6319 if (dest == x)
6320 break;
6322 /* Fall through. */
6324 /* Can accept constant setting of biv only when inside inner most loop.
6325 Otherwise, a biv of an inner loop may be incorrectly recognized
6326 as a biv of the outer loop,
6327 causing code to be moved INTO the inner loop. */
6328 case MEM:
6329 if (loop_invariant_p (loop, x) != 1)
6330 return 0;
6331 case CONST_INT:
6332 case SYMBOL_REF:
6333 case CONST:
6334 /* convert_modes aborts if we try to convert to or from CCmode, so just
6335 exclude that case. It is very unlikely that a condition code value
6336 would be a useful iterator anyways. convert_modes aborts if we try to
6337 convert a float mode to non-float or vice versa too. */
6338 if (loop->level == 1
6339 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6340 && GET_MODE_CLASS (mode) != MODE_CC)
6342 /* Possible bug here? Perhaps we don't know the mode of X. */
6343 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6344 *mult_val = const0_rtx;
6345 return 1;
6347 else
6348 return 0;
6350 case SIGN_EXTEND:
6351 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6352 dest_reg, p, inc_val, mult_val, location);
6354 case ASHIFTRT:
6355 /* Similar, since this can be a sign extension. */
6356 for (insn = PREV_INSN (p);
6357 (insn && GET_CODE (insn) == NOTE
6358 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6359 insn = PREV_INSN (insn))
6362 if (insn)
6363 set = single_set (insn);
6365 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6366 && set && SET_DEST (set) == XEXP (x, 0)
6367 && GET_CODE (XEXP (x, 1)) == CONST_INT
6368 && INTVAL (XEXP (x, 1)) >= 0
6369 && GET_CODE (SET_SRC (set)) == ASHIFT
6370 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6371 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6372 GET_MODE (XEXP (x, 0)),
6373 dest_reg, insn, inc_val, mult_val,
6374 location);
6375 return 0;
6377 default:
6378 return 0;
6382 /* A general induction variable (giv) is any quantity that is a linear
6383 function of a basic induction variable,
6384 i.e. giv = biv * mult_val + add_val.
6385 The coefficients can be any loop invariant quantity.
6386 A giv need not be computed directly from the biv;
6387 it can be computed by way of other givs. */
6389 /* Determine whether X computes a giv.
6390 If it does, return a nonzero value
6391 which is the benefit from eliminating the computation of X;
6392 set *SRC_REG to the register of the biv that it is computed from;
6393 set *ADD_VAL and *MULT_VAL to the coefficients,
6394 such that the value of X is biv * mult + add; */
6396 static int
6397 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6398 is_addr, pbenefit, addr_mode)
6399 const struct loop *loop;
6400 rtx x;
6401 rtx *src_reg;
6402 rtx *add_val;
6403 rtx *mult_val;
6404 rtx *ext_val;
6405 int is_addr;
6406 int *pbenefit;
6407 enum machine_mode addr_mode;
6409 struct loop_ivs *ivs = LOOP_IVS (loop);
6410 rtx orig_x = x;
6412 /* If this is an invariant, forget it, it isn't a giv. */
6413 if (loop_invariant_p (loop, x) == 1)
6414 return 0;
6416 *pbenefit = 0;
6417 *ext_val = NULL_RTX;
6418 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6419 if (x == 0)
6420 return 0;
6422 switch (GET_CODE (x))
6424 case USE:
6425 case CONST_INT:
6426 /* Since this is now an invariant and wasn't before, it must be a giv
6427 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6428 with. */
6429 *src_reg = ivs->list->biv->dest_reg;
6430 *mult_val = const0_rtx;
6431 *add_val = x;
6432 break;
6434 case REG:
6435 /* This is equivalent to a BIV. */
6436 *src_reg = x;
6437 *mult_val = const1_rtx;
6438 *add_val = const0_rtx;
6439 break;
6441 case PLUS:
6442 /* Either (plus (biv) (invar)) or
6443 (plus (mult (biv) (invar_1)) (invar_2)). */
6444 if (GET_CODE (XEXP (x, 0)) == MULT)
6446 *src_reg = XEXP (XEXP (x, 0), 0);
6447 *mult_val = XEXP (XEXP (x, 0), 1);
6449 else
6451 *src_reg = XEXP (x, 0);
6452 *mult_val = const1_rtx;
6454 *add_val = XEXP (x, 1);
6455 break;
6457 case MULT:
6458 /* ADD_VAL is zero. */
6459 *src_reg = XEXP (x, 0);
6460 *mult_val = XEXP (x, 1);
6461 *add_val = const0_rtx;
6462 break;
6464 default:
6465 abort ();
6468 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6469 unless they are CONST_INT). */
6470 if (GET_CODE (*add_val) == USE)
6471 *add_val = XEXP (*add_val, 0);
6472 if (GET_CODE (*mult_val) == USE)
6473 *mult_val = XEXP (*mult_val, 0);
6475 if (is_addr)
6476 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6477 else
6478 *pbenefit += rtx_cost (orig_x, SET);
6480 /* Always return true if this is a giv so it will be detected as such,
6481 even if the benefit is zero or negative. This allows elimination
6482 of bivs that might otherwise not be eliminated. */
6483 return 1;
6486 /* Given an expression, X, try to form it as a linear function of a biv.
6487 We will canonicalize it to be of the form
6488 (plus (mult (BIV) (invar_1))
6489 (invar_2))
6490 with possible degeneracies.
6492 The invariant expressions must each be of a form that can be used as a
6493 machine operand. We surround then with a USE rtx (a hack, but localized
6494 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6495 routine; it is the caller's responsibility to strip them.
6497 If no such canonicalization is possible (i.e., two biv's are used or an
6498 expression that is neither invariant nor a biv or giv), this routine
6499 returns 0.
6501 For a non-zero return, the result will have a code of CONST_INT, USE,
6502 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6504 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6506 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6507 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6509 static rtx
6510 simplify_giv_expr (loop, x, ext_val, benefit)
6511 const struct loop *loop;
6512 rtx x;
6513 rtx *ext_val;
6514 int *benefit;
6516 struct loop_ivs *ivs = LOOP_IVS (loop);
6517 struct loop_regs *regs = LOOP_REGS (loop);
6518 enum machine_mode mode = GET_MODE (x);
6519 rtx arg0, arg1;
6520 rtx tem;
6522 /* If this is not an integer mode, or if we cannot do arithmetic in this
6523 mode, this can't be a giv. */
6524 if (mode != VOIDmode
6525 && (GET_MODE_CLASS (mode) != MODE_INT
6526 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6527 return NULL_RTX;
6529 switch (GET_CODE (x))
6531 case PLUS:
6532 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6533 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6534 if (arg0 == 0 || arg1 == 0)
6535 return NULL_RTX;
6537 /* Put constant last, CONST_INT last if both constant. */
6538 if ((GET_CODE (arg0) == USE
6539 || GET_CODE (arg0) == CONST_INT)
6540 && ! ((GET_CODE (arg0) == USE
6541 && GET_CODE (arg1) == USE)
6542 || GET_CODE (arg1) == CONST_INT))
6543 tem = arg0, arg0 = arg1, arg1 = tem;
6545 /* Handle addition of zero, then addition of an invariant. */
6546 if (arg1 == const0_rtx)
6547 return arg0;
6548 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6549 switch (GET_CODE (arg0))
6551 case CONST_INT:
6552 case USE:
6553 /* Adding two invariants must result in an invariant, so enclose
6554 addition operation inside a USE and return it. */
6555 if (GET_CODE (arg0) == USE)
6556 arg0 = XEXP (arg0, 0);
6557 if (GET_CODE (arg1) == USE)
6558 arg1 = XEXP (arg1, 0);
6560 if (GET_CODE (arg0) == CONST_INT)
6561 tem = arg0, arg0 = arg1, arg1 = tem;
6562 if (GET_CODE (arg1) == CONST_INT)
6563 tem = sge_plus_constant (arg0, arg1);
6564 else
6565 tem = sge_plus (mode, arg0, arg1);
6567 if (GET_CODE (tem) != CONST_INT)
6568 tem = gen_rtx_USE (mode, tem);
6569 return tem;
6571 case REG:
6572 case MULT:
6573 /* biv + invar or mult + invar. Return sum. */
6574 return gen_rtx_PLUS (mode, arg0, arg1);
6576 case PLUS:
6577 /* (a + invar_1) + invar_2. Associate. */
6578 return
6579 simplify_giv_expr (loop,
6580 gen_rtx_PLUS (mode,
6581 XEXP (arg0, 0),
6582 gen_rtx_PLUS (mode,
6583 XEXP (arg0, 1),
6584 arg1)),
6585 ext_val, benefit);
6587 default:
6588 abort ();
6591 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6592 MULT to reduce cases. */
6593 if (GET_CODE (arg0) == REG)
6594 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6595 if (GET_CODE (arg1) == REG)
6596 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6598 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6599 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6600 Recurse to associate the second PLUS. */
6601 if (GET_CODE (arg1) == MULT)
6602 tem = arg0, arg0 = arg1, arg1 = tem;
6604 if (GET_CODE (arg1) == PLUS)
6605 return
6606 simplify_giv_expr (loop,
6607 gen_rtx_PLUS (mode,
6608 gen_rtx_PLUS (mode, arg0,
6609 XEXP (arg1, 0)),
6610 XEXP (arg1, 1)),
6611 ext_val, benefit);
6613 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6614 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6615 return NULL_RTX;
6617 if (!rtx_equal_p (arg0, arg1))
6618 return NULL_RTX;
6620 return simplify_giv_expr (loop,
6621 gen_rtx_MULT (mode,
6622 XEXP (arg0, 0),
6623 gen_rtx_PLUS (mode,
6624 XEXP (arg0, 1),
6625 XEXP (arg1, 1))),
6626 ext_val, benefit);
6628 case MINUS:
6629 /* Handle "a - b" as "a + b * (-1)". */
6630 return simplify_giv_expr (loop,
6631 gen_rtx_PLUS (mode,
6632 XEXP (x, 0),
6633 gen_rtx_MULT (mode,
6634 XEXP (x, 1),
6635 constm1_rtx)),
6636 ext_val, benefit);
6638 case MULT:
6639 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6640 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6641 if (arg0 == 0 || arg1 == 0)
6642 return NULL_RTX;
6644 /* Put constant last, CONST_INT last if both constant. */
6645 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6646 && GET_CODE (arg1) != CONST_INT)
6647 tem = arg0, arg0 = arg1, arg1 = tem;
6649 /* If second argument is not now constant, not giv. */
6650 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6651 return NULL_RTX;
6653 /* Handle multiply by 0 or 1. */
6654 if (arg1 == const0_rtx)
6655 return const0_rtx;
6657 else if (arg1 == const1_rtx)
6658 return arg0;
6660 switch (GET_CODE (arg0))
6662 case REG:
6663 /* biv * invar. Done. */
6664 return gen_rtx_MULT (mode, arg0, arg1);
6666 case CONST_INT:
6667 /* Product of two constants. */
6668 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6670 case USE:
6671 /* invar * invar is a giv, but attempt to simplify it somehow. */
6672 if (GET_CODE (arg1) != CONST_INT)
6673 return NULL_RTX;
6675 arg0 = XEXP (arg0, 0);
6676 if (GET_CODE (arg0) == MULT)
6678 /* (invar_0 * invar_1) * invar_2. Associate. */
6679 return simplify_giv_expr (loop,
6680 gen_rtx_MULT (mode,
6681 XEXP (arg0, 0),
6682 gen_rtx_MULT (mode,
6683 XEXP (arg0,
6685 arg1)),
6686 ext_val, benefit);
6688 /* Porpagate the MULT expressions to the intermost nodes. */
6689 else if (GET_CODE (arg0) == PLUS)
6691 /* (invar_0 + invar_1) * invar_2. Distribute. */
6692 return simplify_giv_expr (loop,
6693 gen_rtx_PLUS (mode,
6694 gen_rtx_MULT (mode,
6695 XEXP (arg0,
6697 arg1),
6698 gen_rtx_MULT (mode,
6699 XEXP (arg0,
6701 arg1)),
6702 ext_val, benefit);
6704 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6706 case MULT:
6707 /* (a * invar_1) * invar_2. Associate. */
6708 return simplify_giv_expr (loop,
6709 gen_rtx_MULT (mode,
6710 XEXP (arg0, 0),
6711 gen_rtx_MULT (mode,
6712 XEXP (arg0, 1),
6713 arg1)),
6714 ext_val, benefit);
6716 case PLUS:
6717 /* (a + invar_1) * invar_2. Distribute. */
6718 return simplify_giv_expr (loop,
6719 gen_rtx_PLUS (mode,
6720 gen_rtx_MULT (mode,
6721 XEXP (arg0, 0),
6722 arg1),
6723 gen_rtx_MULT (mode,
6724 XEXP (arg0, 1),
6725 arg1)),
6726 ext_val, benefit);
6728 default:
6729 abort ();
6732 case ASHIFT:
6733 /* Shift by constant is multiply by power of two. */
6734 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6735 return 0;
6737 return
6738 simplify_giv_expr (loop,
6739 gen_rtx_MULT (mode,
6740 XEXP (x, 0),
6741 GEN_INT ((HOST_WIDE_INT) 1
6742 << INTVAL (XEXP (x, 1)))),
6743 ext_val, benefit);
6745 case NEG:
6746 /* "-a" is "a * (-1)" */
6747 return simplify_giv_expr (loop,
6748 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6749 ext_val, benefit);
6751 case NOT:
6752 /* "~a" is "-a - 1". Silly, but easy. */
6753 return simplify_giv_expr (loop,
6754 gen_rtx_MINUS (mode,
6755 gen_rtx_NEG (mode, XEXP (x, 0)),
6756 const1_rtx),
6757 ext_val, benefit);
6759 case USE:
6760 /* Already in proper form for invariant. */
6761 return x;
6763 case SIGN_EXTEND:
6764 case ZERO_EXTEND:
6765 case TRUNCATE:
6766 /* Conditionally recognize extensions of simple IVs. After we've
6767 computed loop traversal counts and verified the range of the
6768 source IV, we'll reevaluate this as a GIV. */
6769 if (*ext_val == NULL_RTX)
6771 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6772 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6774 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6775 return arg0;
6778 goto do_default;
6780 case REG:
6781 /* If this is a new register, we can't deal with it. */
6782 if (REGNO (x) >= max_reg_before_loop)
6783 return 0;
6785 /* Check for biv or giv. */
6786 switch (REG_IV_TYPE (ivs, REGNO (x)))
6788 case BASIC_INDUCT:
6789 return x;
6790 case GENERAL_INDUCT:
6792 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6794 /* Form expression from giv and add benefit. Ensure this giv
6795 can derive another and subtract any needed adjustment if so. */
6797 /* Increasing the benefit here is risky. The only case in which it
6798 is arguably correct is if this is the only use of V. In other
6799 cases, this will artificially inflate the benefit of the current
6800 giv, and lead to suboptimal code. Thus, it is disabled, since
6801 potentially not reducing an only marginally beneficial giv is
6802 less harmful than reducing many givs that are not really
6803 beneficial. */
6805 rtx single_use = regs->array[REGNO (x)].single_usage;
6806 if (single_use && single_use != const0_rtx)
6807 *benefit += v->benefit;
6810 if (v->cant_derive)
6811 return 0;
6813 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6814 v->src_reg, v->mult_val),
6815 v->add_val);
6817 if (v->derive_adjustment)
6818 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6819 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6820 if (*ext_val)
6822 if (!v->ext_dependent)
6823 return arg0;
6825 else
6827 *ext_val = v->ext_dependent;
6828 return arg0;
6830 return 0;
6833 default:
6834 do_default:
6835 /* If it isn't an induction variable, and it is invariant, we
6836 may be able to simplify things further by looking through
6837 the bits we just moved outside the loop. */
6838 if (loop_invariant_p (loop, x) == 1)
6840 struct movable *m;
6841 struct loop_movables *movables = LOOP_MOVABLES (loop);
6843 for (m = movables->head; m; m = m->next)
6844 if (rtx_equal_p (x, m->set_dest))
6846 /* Ok, we found a match. Substitute and simplify. */
6848 /* If we match another movable, we must use that, as
6849 this one is going away. */
6850 if (m->match)
6851 return simplify_giv_expr (loop, m->match->set_dest,
6852 ext_val, benefit);
6854 /* If consec is non-zero, this is a member of a group of
6855 instructions that were moved together. We handle this
6856 case only to the point of seeking to the last insn and
6857 looking for a REG_EQUAL. Fail if we don't find one. */
6858 if (m->consec != 0)
6860 int i = m->consec;
6861 tem = m->insn;
6864 tem = NEXT_INSN (tem);
6866 while (--i > 0);
6868 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6869 if (tem)
6870 tem = XEXP (tem, 0);
6872 else
6874 tem = single_set (m->insn);
6875 if (tem)
6876 tem = SET_SRC (tem);
6879 if (tem)
6881 /* What we are most interested in is pointer
6882 arithmetic on invariants -- only take
6883 patterns we may be able to do something with. */
6884 if (GET_CODE (tem) == PLUS
6885 || GET_CODE (tem) == MULT
6886 || GET_CODE (tem) == ASHIFT
6887 || GET_CODE (tem) == CONST_INT
6888 || GET_CODE (tem) == SYMBOL_REF)
6890 tem = simplify_giv_expr (loop, tem, ext_val,
6891 benefit);
6892 if (tem)
6893 return tem;
6895 else if (GET_CODE (tem) == CONST
6896 && GET_CODE (XEXP (tem, 0)) == PLUS
6897 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6898 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6900 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6901 ext_val, benefit);
6902 if (tem)
6903 return tem;
6906 break;
6909 break;
6912 /* Fall through to general case. */
6913 default:
6914 /* If invariant, return as USE (unless CONST_INT).
6915 Otherwise, not giv. */
6916 if (GET_CODE (x) == USE)
6917 x = XEXP (x, 0);
6919 if (loop_invariant_p (loop, x) == 1)
6921 if (GET_CODE (x) == CONST_INT)
6922 return x;
6923 if (GET_CODE (x) == CONST
6924 && GET_CODE (XEXP (x, 0)) == PLUS
6925 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6926 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6927 x = XEXP (x, 0);
6928 return gen_rtx_USE (mode, x);
6930 else
6931 return 0;
6935 /* This routine folds invariants such that there is only ever one
6936 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6938 static rtx
6939 sge_plus_constant (x, c)
6940 rtx x, c;
6942 if (GET_CODE (x) == CONST_INT)
6943 return GEN_INT (INTVAL (x) + INTVAL (c));
6944 else if (GET_CODE (x) != PLUS)
6945 return gen_rtx_PLUS (GET_MODE (x), x, c);
6946 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6948 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6949 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6951 else if (GET_CODE (XEXP (x, 0)) == PLUS
6952 || GET_CODE (XEXP (x, 1)) != PLUS)
6954 return gen_rtx_PLUS (GET_MODE (x),
6955 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6957 else
6959 return gen_rtx_PLUS (GET_MODE (x),
6960 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6964 static rtx
6965 sge_plus (mode, x, y)
6966 enum machine_mode mode;
6967 rtx x, y;
6969 while (GET_CODE (y) == PLUS)
6971 rtx a = XEXP (y, 0);
6972 if (GET_CODE (a) == CONST_INT)
6973 x = sge_plus_constant (x, a);
6974 else
6975 x = gen_rtx_PLUS (mode, x, a);
6976 y = XEXP (y, 1);
6978 if (GET_CODE (y) == CONST_INT)
6979 x = sge_plus_constant (x, y);
6980 else
6981 x = gen_rtx_PLUS (mode, x, y);
6982 return x;
6985 /* Help detect a giv that is calculated by several consecutive insns;
6986 for example,
6987 giv = biv * M
6988 giv = giv + A
6989 The caller has already identified the first insn P as having a giv as dest;
6990 we check that all other insns that set the same register follow
6991 immediately after P, that they alter nothing else,
6992 and that the result of the last is still a giv.
6994 The value is 0 if the reg set in P is not really a giv.
6995 Otherwise, the value is the amount gained by eliminating
6996 all the consecutive insns that compute the value.
6998 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6999 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
7001 The coefficients of the ultimate giv value are stored in
7002 *MULT_VAL and *ADD_VAL. */
7004 static int
7005 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
7006 add_val, mult_val, ext_val, last_consec_insn)
7007 const struct loop *loop;
7008 int first_benefit;
7009 rtx p;
7010 rtx src_reg;
7011 rtx dest_reg;
7012 rtx *add_val;
7013 rtx *mult_val;
7014 rtx *ext_val;
7015 rtx *last_consec_insn;
7017 struct loop_ivs *ivs = LOOP_IVS (loop);
7018 struct loop_regs *regs = LOOP_REGS (loop);
7019 int count;
7020 enum rtx_code code;
7021 int benefit;
7022 rtx temp;
7023 rtx set;
7025 /* Indicate that this is a giv so that we can update the value produced in
7026 each insn of the multi-insn sequence.
7028 This induction structure will be used only by the call to
7029 general_induction_var below, so we can allocate it on our stack.
7030 If this is a giv, our caller will replace the induct var entry with
7031 a new induction structure. */
7032 struct induction *v;
7034 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7035 return 0;
7037 v = (struct induction *) alloca (sizeof (struct induction));
7038 v->src_reg = src_reg;
7039 v->mult_val = *mult_val;
7040 v->add_val = *add_val;
7041 v->benefit = first_benefit;
7042 v->cant_derive = 0;
7043 v->derive_adjustment = 0;
7044 v->ext_dependent = NULL_RTX;
7046 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7047 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7049 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7051 while (count > 0)
7053 p = NEXT_INSN (p);
7054 code = GET_CODE (p);
7056 /* If libcall, skip to end of call sequence. */
7057 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7058 p = XEXP (temp, 0);
7060 if (code == INSN
7061 && (set = single_set (p))
7062 && GET_CODE (SET_DEST (set)) == REG
7063 && SET_DEST (set) == dest_reg
7064 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7065 add_val, mult_val, ext_val, 0,
7066 &benefit, VOIDmode)
7067 /* Giv created by equivalent expression. */
7068 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7069 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7070 add_val, mult_val, ext_val, 0,
7071 &benefit, VOIDmode)))
7072 && src_reg == v->src_reg)
7074 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7075 benefit += libcall_benefit (p);
7077 count--;
7078 v->mult_val = *mult_val;
7079 v->add_val = *add_val;
7080 v->benefit += benefit;
7082 else if (code != NOTE)
7084 /* Allow insns that set something other than this giv to a
7085 constant. Such insns are needed on machines which cannot
7086 include long constants and should not disqualify a giv. */
7087 if (code == INSN
7088 && (set = single_set (p))
7089 && SET_DEST (set) != dest_reg
7090 && CONSTANT_P (SET_SRC (set)))
7091 continue;
7093 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7094 return 0;
7098 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7099 *last_consec_insn = p;
7100 return v->benefit;
7103 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7104 represented by G1. If no such expression can be found, or it is clear that
7105 it cannot possibly be a valid address, 0 is returned.
7107 To perform the computation, we note that
7108 G1 = x * v + a and
7109 G2 = y * v + b
7110 where `v' is the biv.
7112 So G2 = (y/b) * G1 + (b - a*y/x).
7114 Note that MULT = y/x.
7116 Update: A and B are now allowed to be additive expressions such that
7117 B contains all variables in A. That is, computing B-A will not require
7118 subtracting variables. */
7120 static rtx
7121 express_from_1 (a, b, mult)
7122 rtx a, b, mult;
7124 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7126 if (mult == const0_rtx)
7127 return b;
7129 /* If MULT is not 1, we cannot handle A with non-constants, since we
7130 would then be required to subtract multiples of the registers in A.
7131 This is theoretically possible, and may even apply to some Fortran
7132 constructs, but it is a lot of work and we do not attempt it here. */
7134 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7135 return NULL_RTX;
7137 /* In general these structures are sorted top to bottom (down the PLUS
7138 chain), but not left to right across the PLUS. If B is a higher
7139 order giv than A, we can strip one level and recurse. If A is higher
7140 order, we'll eventually bail out, but won't know that until the end.
7141 If they are the same, we'll strip one level around this loop. */
7143 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7145 rtx ra, rb, oa, ob, tmp;
7147 ra = XEXP (a, 0), oa = XEXP (a, 1);
7148 if (GET_CODE (ra) == PLUS)
7149 tmp = ra, ra = oa, oa = tmp;
7151 rb = XEXP (b, 0), ob = XEXP (b, 1);
7152 if (GET_CODE (rb) == PLUS)
7153 tmp = rb, rb = ob, ob = tmp;
7155 if (rtx_equal_p (ra, rb))
7156 /* We matched: remove one reg completely. */
7157 a = oa, b = ob;
7158 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7159 /* An alternate match. */
7160 a = oa, b = rb;
7161 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7162 /* An alternate match. */
7163 a = ra, b = ob;
7164 else
7166 /* Indicates an extra register in B. Strip one level from B and
7167 recurse, hoping B was the higher order expression. */
7168 ob = express_from_1 (a, ob, mult);
7169 if (ob == NULL_RTX)
7170 return NULL_RTX;
7171 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7175 /* Here we are at the last level of A, go through the cases hoping to
7176 get rid of everything but a constant. */
7178 if (GET_CODE (a) == PLUS)
7180 rtx ra, oa;
7182 ra = XEXP (a, 0), oa = XEXP (a, 1);
7183 if (rtx_equal_p (oa, b))
7184 oa = ra;
7185 else if (!rtx_equal_p (ra, b))
7186 return NULL_RTX;
7188 if (GET_CODE (oa) != CONST_INT)
7189 return NULL_RTX;
7191 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7193 else if (GET_CODE (a) == CONST_INT)
7195 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7197 else if (CONSTANT_P (a))
7199 enum machine_mode mode_a = GET_MODE (a);
7200 enum machine_mode mode_b = GET_MODE (b);
7201 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7202 return simplify_gen_binary (MINUS, mode, b, a);
7204 else if (GET_CODE (b) == PLUS)
7206 if (rtx_equal_p (a, XEXP (b, 0)))
7207 return XEXP (b, 1);
7208 else if (rtx_equal_p (a, XEXP (b, 1)))
7209 return XEXP (b, 0);
7210 else
7211 return NULL_RTX;
7213 else if (rtx_equal_p (a, b))
7214 return const0_rtx;
7216 return NULL_RTX;
7220 express_from (g1, g2)
7221 struct induction *g1, *g2;
7223 rtx mult, add;
7225 /* The value that G1 will be multiplied by must be a constant integer. Also,
7226 the only chance we have of getting a valid address is if b*c/a (see above
7227 for notation) is also an integer. */
7228 if (GET_CODE (g1->mult_val) == CONST_INT
7229 && GET_CODE (g2->mult_val) == CONST_INT)
7231 if (g1->mult_val == const0_rtx
7232 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7233 return NULL_RTX;
7234 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7236 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7237 mult = const1_rtx;
7238 else
7240 /* ??? Find out if the one is a multiple of the other? */
7241 return NULL_RTX;
7244 add = express_from_1 (g1->add_val, g2->add_val, mult);
7245 if (add == NULL_RTX)
7247 /* Failed. If we've got a multiplication factor between G1 and G2,
7248 scale G1's addend and try again. */
7249 if (INTVAL (mult) > 1)
7251 rtx g1_add_val = g1->add_val;
7252 if (GET_CODE (g1_add_val) == MULT
7253 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7255 HOST_WIDE_INT m;
7256 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7257 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7258 XEXP (g1_add_val, 0), GEN_INT (m));
7260 else
7262 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7263 mult);
7266 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7269 if (add == NULL_RTX)
7270 return NULL_RTX;
7272 /* Form simplified final result. */
7273 if (mult == const0_rtx)
7274 return add;
7275 else if (mult == const1_rtx)
7276 mult = g1->dest_reg;
7277 else
7278 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7280 if (add == const0_rtx)
7281 return mult;
7282 else
7284 if (GET_CODE (add) == PLUS
7285 && CONSTANT_P (XEXP (add, 1)))
7287 rtx tem = XEXP (add, 1);
7288 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7289 add = tem;
7292 return gen_rtx_PLUS (g2->mode, mult, add);
7296 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7297 represented by G1. This indicates that G2 should be combined with G1 and
7298 that G2 can use (either directly or via an address expression) a register
7299 used to represent G1. */
7301 static rtx
7302 combine_givs_p (g1, g2)
7303 struct induction *g1, *g2;
7305 rtx comb, ret;
7307 /* With the introduction of ext dependent givs, we must care for modes.
7308 G2 must not use a wider mode than G1. */
7309 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7310 return NULL_RTX;
7312 ret = comb = express_from (g1, g2);
7313 if (comb == NULL_RTX)
7314 return NULL_RTX;
7315 if (g1->mode != g2->mode)
7316 ret = gen_lowpart (g2->mode, comb);
7318 /* If these givs are identical, they can be combined. We use the results
7319 of express_from because the addends are not in a canonical form, so
7320 rtx_equal_p is a weaker test. */
7321 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7322 combination to be the other way round. */
7323 if (comb == g1->dest_reg
7324 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7326 return ret;
7329 /* If G2 can be expressed as a function of G1 and that function is valid
7330 as an address and no more expensive than using a register for G2,
7331 the expression of G2 in terms of G1 can be used. */
7332 if (ret != NULL_RTX
7333 && g2->giv_type == DEST_ADDR
7334 && memory_address_p (GET_MODE (g2->mem), ret)
7335 /* ??? Looses, especially with -fforce-addr, where *g2->location
7336 will always be a register, and so anything more complicated
7337 gets discarded. */
7338 #if 0
7339 #ifdef ADDRESS_COST
7340 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7341 #else
7342 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7343 #endif
7344 #endif
7347 return ret;
7350 return NULL_RTX;
7353 /* Check each extension dependent giv in this class to see if its
7354 root biv is safe from wrapping in the interior mode, which would
7355 make the giv illegal. */
7357 static void
7358 check_ext_dependent_givs (bl, loop_info)
7359 struct iv_class *bl;
7360 struct loop_info *loop_info;
7362 int ze_ok = 0, se_ok = 0, info_ok = 0;
7363 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7364 HOST_WIDE_INT start_val;
7365 unsigned HOST_WIDE_INT u_end_val = 0;
7366 unsigned HOST_WIDE_INT u_start_val = 0;
7367 rtx incr = pc_rtx;
7368 struct induction *v;
7370 /* Make sure the iteration data is available. We must have
7371 constants in order to be certain of no overflow. */
7372 /* ??? An unknown iteration count with an increment of +-1
7373 combined with friendly exit tests of against an invariant
7374 value is also ameanable to optimization. Not implemented. */
7375 if (loop_info->n_iterations > 0
7376 && bl->initial_value
7377 && GET_CODE (bl->initial_value) == CONST_INT
7378 && (incr = biv_total_increment (bl))
7379 && GET_CODE (incr) == CONST_INT
7380 /* Make sure the host can represent the arithmetic. */
7381 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7383 unsigned HOST_WIDE_INT abs_incr, total_incr;
7384 HOST_WIDE_INT s_end_val;
7385 int neg_incr;
7387 info_ok = 1;
7388 start_val = INTVAL (bl->initial_value);
7389 u_start_val = start_val;
7391 neg_incr = 0, abs_incr = INTVAL (incr);
7392 if (INTVAL (incr) < 0)
7393 neg_incr = 1, abs_incr = -abs_incr;
7394 total_incr = abs_incr * loop_info->n_iterations;
7396 /* Check for host arithmatic overflow. */
7397 if (total_incr / loop_info->n_iterations == abs_incr)
7399 unsigned HOST_WIDE_INT u_max;
7400 HOST_WIDE_INT s_max;
7402 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7403 s_end_val = u_end_val;
7404 u_max = GET_MODE_MASK (biv_mode);
7405 s_max = u_max >> 1;
7407 /* Check zero extension of biv ok. */
7408 if (start_val >= 0
7409 /* Check for host arithmatic overflow. */
7410 && (neg_incr
7411 ? u_end_val < u_start_val
7412 : u_end_val > u_start_val)
7413 /* Check for target arithmetic overflow. */
7414 && (neg_incr
7415 ? 1 /* taken care of with host overflow */
7416 : u_end_val <= u_max))
7418 ze_ok = 1;
7421 /* Check sign extension of biv ok. */
7422 /* ??? While it is true that overflow with signed and pointer
7423 arithmetic is undefined, I fear too many programmers don't
7424 keep this fact in mind -- myself included on occasion.
7425 So leave alone with the signed overflow optimizations. */
7426 if (start_val >= -s_max - 1
7427 /* Check for host arithmatic overflow. */
7428 && (neg_incr
7429 ? s_end_val < start_val
7430 : s_end_val > start_val)
7431 /* Check for target arithmetic overflow. */
7432 && (neg_incr
7433 ? s_end_val >= -s_max - 1
7434 : s_end_val <= s_max))
7436 se_ok = 1;
7441 /* Invalidate givs that fail the tests. */
7442 for (v = bl->giv; v; v = v->next_iv)
7443 if (v->ext_dependent)
7445 enum rtx_code code = GET_CODE (v->ext_dependent);
7446 int ok = 0;
7448 switch (code)
7450 case SIGN_EXTEND:
7451 ok = se_ok;
7452 break;
7453 case ZERO_EXTEND:
7454 ok = ze_ok;
7455 break;
7457 case TRUNCATE:
7458 /* We don't know whether this value is being used as either
7459 signed or unsigned, so to safely truncate we must satisfy
7460 both. The initial check here verifies the BIV itself;
7461 once that is successful we may check its range wrt the
7462 derived GIV. */
7463 if (se_ok && ze_ok)
7465 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7466 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7468 /* We know from the above that both endpoints are nonnegative,
7469 and that there is no wrapping. Verify that both endpoints
7470 are within the (signed) range of the outer mode. */
7471 if (u_start_val <= max && u_end_val <= max)
7472 ok = 1;
7474 break;
7476 default:
7477 abort ();
7480 if (ok)
7482 if (loop_dump_stream)
7484 fprintf (loop_dump_stream,
7485 "Verified ext dependent giv at %d of reg %d\n",
7486 INSN_UID (v->insn), bl->regno);
7489 else
7491 if (loop_dump_stream)
7493 const char *why;
7495 if (info_ok)
7496 why = "biv iteration values overflowed";
7497 else
7499 if (incr == pc_rtx)
7500 incr = biv_total_increment (bl);
7501 if (incr == const1_rtx)
7502 why = "biv iteration info incomplete; incr by 1";
7503 else
7504 why = "biv iteration info incomplete";
7507 fprintf (loop_dump_stream,
7508 "Failed ext dependent giv at %d, %s\n",
7509 INSN_UID (v->insn), why);
7511 v->ignore = 1;
7512 bl->all_reduced = 0;
7517 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7520 extend_value_for_giv (v, value)
7521 struct induction *v;
7522 rtx value;
7524 rtx ext_dep = v->ext_dependent;
7526 if (! ext_dep)
7527 return value;
7529 /* Recall that check_ext_dependent_givs verified that the known bounds
7530 of a biv did not overflow or wrap with respect to the extension for
7531 the giv. Therefore, constants need no additional adjustment. */
7532 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7533 return value;
7535 /* Otherwise, we must adjust the value to compensate for the
7536 differing modes of the biv and the giv. */
7537 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7540 struct combine_givs_stats
7542 int giv_number;
7543 int total_benefit;
7546 static int
7547 cmp_combine_givs_stats (xp, yp)
7548 const PTR xp;
7549 const PTR yp;
7551 const struct combine_givs_stats * const x =
7552 (const struct combine_givs_stats *) xp;
7553 const struct combine_givs_stats * const y =
7554 (const struct combine_givs_stats *) yp;
7555 int d;
7556 d = y->total_benefit - x->total_benefit;
7557 /* Stabilize the sort. */
7558 if (!d)
7559 d = x->giv_number - y->giv_number;
7560 return d;
7563 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7564 any other. If so, point SAME to the giv combined with and set NEW_REG to
7565 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7566 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7568 static void
7569 combine_givs (regs, bl)
7570 struct loop_regs *regs;
7571 struct iv_class *bl;
7573 /* Additional benefit to add for being combined multiple times. */
7574 const int extra_benefit = 3;
7576 struct induction *g1, *g2, **giv_array;
7577 int i, j, k, giv_count;
7578 struct combine_givs_stats *stats;
7579 rtx *can_combine;
7581 /* Count givs, because bl->giv_count is incorrect here. */
7582 giv_count = 0;
7583 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7584 if (!g1->ignore)
7585 giv_count++;
7587 giv_array
7588 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7589 i = 0;
7590 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7591 if (!g1->ignore)
7592 giv_array[i++] = g1;
7594 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7595 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7597 for (i = 0; i < giv_count; i++)
7599 int this_benefit;
7600 rtx single_use;
7602 g1 = giv_array[i];
7603 stats[i].giv_number = i;
7605 /* If a DEST_REG GIV is used only once, do not allow it to combine
7606 with anything, for in doing so we will gain nothing that cannot
7607 be had by simply letting the GIV with which we would have combined
7608 to be reduced on its own. The losage shows up in particular with
7609 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7610 be seen elsewhere as well. */
7611 if (g1->giv_type == DEST_REG
7612 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7613 && single_use != const0_rtx)
7614 continue;
7616 this_benefit = g1->benefit;
7617 /* Add an additional weight for zero addends. */
7618 if (g1->no_const_addval)
7619 this_benefit += 1;
7621 for (j = 0; j < giv_count; j++)
7623 rtx this_combine;
7625 g2 = giv_array[j];
7626 if (g1 != g2
7627 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7629 can_combine[i * giv_count + j] = this_combine;
7630 this_benefit += g2->benefit + extra_benefit;
7633 stats[i].total_benefit = this_benefit;
7636 /* Iterate, combining until we can't. */
7637 restart:
7638 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7640 if (loop_dump_stream)
7642 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7643 for (k = 0; k < giv_count; k++)
7645 g1 = giv_array[stats[k].giv_number];
7646 if (!g1->combined_with && !g1->same)
7647 fprintf (loop_dump_stream, " {%d, %d}",
7648 INSN_UID (giv_array[stats[k].giv_number]->insn),
7649 stats[k].total_benefit);
7651 putc ('\n', loop_dump_stream);
7654 for (k = 0; k < giv_count; k++)
7656 int g1_add_benefit = 0;
7658 i = stats[k].giv_number;
7659 g1 = giv_array[i];
7661 /* If it has already been combined, skip. */
7662 if (g1->combined_with || g1->same)
7663 continue;
7665 for (j = 0; j < giv_count; j++)
7667 g2 = giv_array[j];
7668 if (g1 != g2 && can_combine[i * giv_count + j]
7669 /* If it has already been combined, skip. */
7670 && ! g2->same && ! g2->combined_with)
7672 int l;
7674 g2->new_reg = can_combine[i * giv_count + j];
7675 g2->same = g1;
7676 /* For destination, we now may replace by mem expression instead
7677 of register. This changes the costs considerably, so add the
7678 compensation. */
7679 if (g2->giv_type == DEST_ADDR)
7680 g2->benefit = (g2->benefit + reg_address_cost
7681 - address_cost (g2->new_reg,
7682 GET_MODE (g2->mem)));
7683 g1->combined_with++;
7684 g1->lifetime += g2->lifetime;
7686 g1_add_benefit += g2->benefit;
7688 /* ??? The new final_[bg]iv_value code does a much better job
7689 of finding replaceable giv's, and hence this code may no
7690 longer be necessary. */
7691 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7692 g1_add_benefit -= copy_cost;
7694 /* To help optimize the next set of combinations, remove
7695 this giv from the benefits of other potential mates. */
7696 for (l = 0; l < giv_count; ++l)
7698 int m = stats[l].giv_number;
7699 if (can_combine[m * giv_count + j])
7700 stats[l].total_benefit -= g2->benefit + extra_benefit;
7703 if (loop_dump_stream)
7704 fprintf (loop_dump_stream,
7705 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7706 INSN_UID (g2->insn), INSN_UID (g1->insn),
7707 g1->benefit, g1_add_benefit, g1->lifetime);
7711 /* To help optimize the next set of combinations, remove
7712 this giv from the benefits of other potential mates. */
7713 if (g1->combined_with)
7715 for (j = 0; j < giv_count; ++j)
7717 int m = stats[j].giv_number;
7718 if (can_combine[m * giv_count + i])
7719 stats[j].total_benefit -= g1->benefit + extra_benefit;
7722 g1->benefit += g1_add_benefit;
7724 /* We've finished with this giv, and everything it touched.
7725 Restart the combination so that proper weights for the
7726 rest of the givs are properly taken into account. */
7727 /* ??? Ideally we would compact the arrays at this point, so
7728 as to not cover old ground. But sanely compacting
7729 can_combine is tricky. */
7730 goto restart;
7734 /* Clean up. */
7735 free (stats);
7736 free (can_combine);
7739 /* Generate sequence for REG = B * M + A. */
7741 static rtx
7742 gen_add_mult (b, m, a, reg)
7743 rtx b; /* initial value of basic induction variable */
7744 rtx m; /* multiplicative constant */
7745 rtx a; /* additive constant */
7746 rtx reg; /* destination register */
7748 rtx seq;
7749 rtx result;
7751 start_sequence ();
7752 /* Use unsigned arithmetic. */
7753 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7754 if (reg != result)
7755 emit_move_insn (reg, result);
7756 seq = get_insns ();
7757 end_sequence ();
7759 return seq;
7763 /* Update registers created in insn sequence SEQ. */
7765 static void
7766 loop_regs_update (loop, seq)
7767 const struct loop *loop ATTRIBUTE_UNUSED;
7768 rtx seq;
7770 rtx insn;
7772 /* Update register info for alias analysis. */
7774 if (seq == NULL_RTX)
7775 return;
7777 if (INSN_P (seq))
7779 insn = seq;
7780 while (insn != NULL_RTX)
7782 rtx set = single_set (insn);
7784 if (set && GET_CODE (SET_DEST (set)) == REG)
7785 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7787 insn = NEXT_INSN (insn);
7790 else if (GET_CODE (seq) == SET
7791 && GET_CODE (SET_DEST (seq)) == REG)
7792 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7796 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7798 void
7799 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7800 const struct loop *loop;
7801 rtx b; /* initial value of basic induction variable */
7802 rtx m; /* multiplicative constant */
7803 rtx a; /* additive constant */
7804 rtx reg; /* destination register */
7805 basic_block before_bb;
7806 rtx before_insn;
7808 rtx seq;
7810 if (! before_insn)
7812 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7813 return;
7816 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7817 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7819 /* Increase the lifetime of any invariants moved further in code. */
7820 update_reg_last_use (a, before_insn);
7821 update_reg_last_use (b, before_insn);
7822 update_reg_last_use (m, before_insn);
7824 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7826 /* It is possible that the expansion created lots of new registers.
7827 Iterate over the sequence we just created and record them all. */
7828 loop_regs_update (loop, seq);
7832 /* Emit insns in loop pre-header to set REG = B * M + A. */
7834 void
7835 loop_iv_add_mult_sink (loop, b, m, a, reg)
7836 const struct loop *loop;
7837 rtx b; /* initial value of basic induction variable */
7838 rtx m; /* multiplicative constant */
7839 rtx a; /* additive constant */
7840 rtx reg; /* destination register */
7842 rtx seq;
7844 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7845 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7847 /* Increase the lifetime of any invariants moved further in code.
7848 ???? Is this really necessary? */
7849 update_reg_last_use (a, loop->sink);
7850 update_reg_last_use (b, loop->sink);
7851 update_reg_last_use (m, loop->sink);
7853 loop_insn_sink (loop, seq);
7855 /* It is possible that the expansion created lots of new registers.
7856 Iterate over the sequence we just created and record them all. */
7857 loop_regs_update (loop, seq);
7861 /* Emit insns after loop to set REG = B * M + A. */
7863 void
7864 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7865 const struct loop *loop;
7866 rtx b; /* initial value of basic induction variable */
7867 rtx m; /* multiplicative constant */
7868 rtx a; /* additive constant */
7869 rtx reg; /* destination register */
7871 rtx seq;
7873 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7874 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7876 loop_insn_hoist (loop, seq);
7878 /* It is possible that the expansion created lots of new registers.
7879 Iterate over the sequence we just created and record them all. */
7880 loop_regs_update (loop, seq);
7885 /* Similar to gen_add_mult, but compute cost rather than generating
7886 sequence. */
7888 static int
7889 iv_add_mult_cost (b, m, a, reg)
7890 rtx b; /* initial value of basic induction variable */
7891 rtx m; /* multiplicative constant */
7892 rtx a; /* additive constant */
7893 rtx reg; /* destination register */
7895 int cost = 0;
7896 rtx last, result;
7898 start_sequence ();
7899 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7900 if (reg != result)
7901 emit_move_insn (reg, result);
7902 last = get_last_insn ();
7903 while (last)
7905 rtx t = single_set (last);
7906 if (t)
7907 cost += rtx_cost (SET_SRC (t), SET);
7908 last = PREV_INSN (last);
7910 end_sequence ();
7911 return cost;
7914 /* Test whether A * B can be computed without
7915 an actual multiply insn. Value is 1 if so.
7917 ??? This function stinks because it generates a ton of wasted RTL
7918 ??? and as a result fragments GC memory to no end. There are other
7919 ??? places in the compiler which are invoked a lot and do the same
7920 ??? thing, generate wasted RTL just to see if something is possible. */
7922 static int
7923 product_cheap_p (a, b)
7924 rtx a;
7925 rtx b;
7927 rtx tmp;
7928 int win, n_insns;
7930 /* If only one is constant, make it B. */
7931 if (GET_CODE (a) == CONST_INT)
7932 tmp = a, a = b, b = tmp;
7934 /* If first constant, both constant, so don't need multiply. */
7935 if (GET_CODE (a) == CONST_INT)
7936 return 1;
7938 /* If second not constant, neither is constant, so would need multiply. */
7939 if (GET_CODE (b) != CONST_INT)
7940 return 0;
7942 /* One operand is constant, so might not need multiply insn. Generate the
7943 code for the multiply and see if a call or multiply, or long sequence
7944 of insns is generated. */
7946 start_sequence ();
7947 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7948 tmp = get_insns ();
7949 end_sequence ();
7951 win = 1;
7952 if (INSN_P (tmp))
7954 n_insns = 0;
7955 while (tmp != NULL_RTX)
7957 rtx next = NEXT_INSN (tmp);
7959 if (++n_insns > 3
7960 || GET_CODE (tmp) != INSN
7961 || (GET_CODE (PATTERN (tmp)) == SET
7962 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7963 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7964 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7965 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7967 win = 0;
7968 break;
7971 tmp = next;
7974 else if (GET_CODE (tmp) == SET
7975 && GET_CODE (SET_SRC (tmp)) == MULT)
7976 win = 0;
7977 else if (GET_CODE (tmp) == PARALLEL
7978 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7979 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7980 win = 0;
7982 return win;
7985 /* Check to see if loop can be terminated by a "decrement and branch until
7986 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7987 Also try reversing an increment loop to a decrement loop
7988 to see if the optimization can be performed.
7989 Value is nonzero if optimization was performed. */
7991 /* This is useful even if the architecture doesn't have such an insn,
7992 because it might change a loops which increments from 0 to n to a loop
7993 which decrements from n to 0. A loop that decrements to zero is usually
7994 faster than one that increments from zero. */
7996 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7997 such as approx_final_value, biv_total_increment, loop_iterations, and
7998 final_[bg]iv_value. */
8000 static int
8001 check_dbra_loop (loop, insn_count)
8002 struct loop *loop;
8003 int insn_count;
8005 struct loop_info *loop_info = LOOP_INFO (loop);
8006 struct loop_regs *regs = LOOP_REGS (loop);
8007 struct loop_ivs *ivs = LOOP_IVS (loop);
8008 struct iv_class *bl;
8009 rtx reg;
8010 rtx jump_label;
8011 rtx final_value;
8012 rtx start_value;
8013 rtx new_add_val;
8014 rtx comparison;
8015 rtx before_comparison;
8016 rtx p;
8017 rtx jump;
8018 rtx first_compare;
8019 int compare_and_branch;
8020 rtx loop_start = loop->start;
8021 rtx loop_end = loop->end;
8023 /* If last insn is a conditional branch, and the insn before tests a
8024 register value, try to optimize it. Otherwise, we can't do anything. */
8026 jump = PREV_INSN (loop_end);
8027 comparison = get_condition_for_loop (loop, jump);
8028 if (comparison == 0)
8029 return 0;
8030 if (!onlyjump_p (jump))
8031 return 0;
8033 /* Try to compute whether the compare/branch at the loop end is one or
8034 two instructions. */
8035 get_condition (jump, &first_compare);
8036 if (first_compare == jump)
8037 compare_and_branch = 1;
8038 else if (first_compare == prev_nonnote_insn (jump))
8039 compare_and_branch = 2;
8040 else
8041 return 0;
8044 /* If more than one condition is present to control the loop, then
8045 do not proceed, as this function does not know how to rewrite
8046 loop tests with more than one condition.
8048 Look backwards from the first insn in the last comparison
8049 sequence and see if we've got another comparison sequence. */
8051 rtx jump1;
8052 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8053 if (GET_CODE (jump1) == JUMP_INSN)
8054 return 0;
8057 /* Check all of the bivs to see if the compare uses one of them.
8058 Skip biv's set more than once because we can't guarantee that
8059 it will be zero on the last iteration. Also skip if the biv is
8060 used between its update and the test insn. */
8062 for (bl = ivs->list; bl; bl = bl->next)
8064 if (bl->biv_count == 1
8065 && ! bl->biv->maybe_multiple
8066 && bl->biv->dest_reg == XEXP (comparison, 0)
8067 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8068 first_compare))
8069 break;
8072 if (! bl)
8073 return 0;
8075 /* Look for the case where the basic induction variable is always
8076 nonnegative, and equals zero on the last iteration.
8077 In this case, add a reg_note REG_NONNEG, which allows the
8078 m68k DBRA instruction to be used. */
8080 if (((GET_CODE (comparison) == GT
8081 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8082 && INTVAL (XEXP (comparison, 1)) == -1)
8083 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8084 && GET_CODE (bl->biv->add_val) == CONST_INT
8085 && INTVAL (bl->biv->add_val) < 0)
8087 /* Initial value must be greater than 0,
8088 init_val % -dec_value == 0 to ensure that it equals zero on
8089 the last iteration */
8091 if (GET_CODE (bl->initial_value) == CONST_INT
8092 && INTVAL (bl->initial_value) > 0
8093 && (INTVAL (bl->initial_value)
8094 % (-INTVAL (bl->biv->add_val))) == 0)
8096 /* register always nonnegative, add REG_NOTE to branch */
8097 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8098 REG_NOTES (jump)
8099 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8100 REG_NOTES (jump));
8101 bl->nonneg = 1;
8103 return 1;
8106 /* If the decrement is 1 and the value was tested as >= 0 before
8107 the loop, then we can safely optimize. */
8108 for (p = loop_start; p; p = PREV_INSN (p))
8110 if (GET_CODE (p) == CODE_LABEL)
8111 break;
8112 if (GET_CODE (p) != JUMP_INSN)
8113 continue;
8115 before_comparison = get_condition_for_loop (loop, p);
8116 if (before_comparison
8117 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8118 && GET_CODE (before_comparison) == LT
8119 && XEXP (before_comparison, 1) == const0_rtx
8120 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8121 && INTVAL (bl->biv->add_val) == -1)
8123 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8124 REG_NOTES (jump)
8125 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8126 REG_NOTES (jump));
8127 bl->nonneg = 1;
8129 return 1;
8133 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8134 && INTVAL (bl->biv->add_val) > 0)
8136 /* Try to change inc to dec, so can apply above optimization. */
8137 /* Can do this if:
8138 all registers modified are induction variables or invariant,
8139 all memory references have non-overlapping addresses
8140 (obviously true if only one write)
8141 allow 2 insns for the compare/jump at the end of the loop. */
8142 /* Also, we must avoid any instructions which use both the reversed
8143 biv and another biv. Such instructions will fail if the loop is
8144 reversed. We meet this condition by requiring that either
8145 no_use_except_counting is true, or else that there is only
8146 one biv. */
8147 int num_nonfixed_reads = 0;
8148 /* 1 if the iteration var is used only to count iterations. */
8149 int no_use_except_counting = 0;
8150 /* 1 if the loop has no memory store, or it has a single memory store
8151 which is reversible. */
8152 int reversible_mem_store = 1;
8154 if (bl->giv_count == 0
8155 && !loop->exit_count
8156 && !loop_info->has_multiple_exit_targets)
8158 rtx bivreg = regno_reg_rtx[bl->regno];
8159 struct iv_class *blt;
8161 /* If there are no givs for this biv, and the only exit is the
8162 fall through at the end of the loop, then
8163 see if perhaps there are no uses except to count. */
8164 no_use_except_counting = 1;
8165 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8166 if (INSN_P (p))
8168 rtx set = single_set (p);
8170 if (set && GET_CODE (SET_DEST (set)) == REG
8171 && REGNO (SET_DEST (set)) == bl->regno)
8172 /* An insn that sets the biv is okay. */
8174 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8175 || p == prev_nonnote_insn (loop_end))
8176 && reg_mentioned_p (bivreg, PATTERN (p)))
8178 /* If either of these insns uses the biv and sets a pseudo
8179 that has more than one usage, then the biv has uses
8180 other than counting since it's used to derive a value
8181 that is used more than one time. */
8182 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8183 regs);
8184 if (regs->multiple_uses)
8186 no_use_except_counting = 0;
8187 break;
8190 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8192 no_use_except_counting = 0;
8193 break;
8197 /* A biv has uses besides counting if it is used to set
8198 another biv. */
8199 for (blt = ivs->list; blt; blt = blt->next)
8200 if (blt->init_set
8201 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8203 no_use_except_counting = 0;
8204 break;
8208 if (no_use_except_counting)
8209 /* No need to worry about MEMs. */
8211 else if (loop_info->num_mem_sets <= 1)
8213 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8214 if (INSN_P (p))
8215 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8217 /* If the loop has a single store, and the destination address is
8218 invariant, then we can't reverse the loop, because this address
8219 might then have the wrong value at loop exit.
8220 This would work if the source was invariant also, however, in that
8221 case, the insn should have been moved out of the loop. */
8223 if (loop_info->num_mem_sets == 1)
8225 struct induction *v;
8227 /* If we could prove that each of the memory locations
8228 written to was different, then we could reverse the
8229 store -- but we don't presently have any way of
8230 knowing that. */
8231 reversible_mem_store = 0;
8233 /* If the store depends on a register that is set after the
8234 store, it depends on the initial value, and is thus not
8235 reversible. */
8236 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8238 if (v->giv_type == DEST_REG
8239 && reg_mentioned_p (v->dest_reg,
8240 PATTERN (loop_info->first_loop_store_insn))
8241 && loop_insn_first_p (loop_info->first_loop_store_insn,
8242 v->insn))
8243 reversible_mem_store = 0;
8247 else
8248 return 0;
8250 /* This code only acts for innermost loops. Also it simplifies
8251 the memory address check by only reversing loops with
8252 zero or one memory access.
8253 Two memory accesses could involve parts of the same array,
8254 and that can't be reversed.
8255 If the biv is used only for counting, than we don't need to worry
8256 about all these things. */
8258 if ((num_nonfixed_reads <= 1
8259 && ! loop_info->has_nonconst_call
8260 && ! loop_info->has_prefetch
8261 && ! loop_info->has_volatile
8262 && reversible_mem_store
8263 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8264 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8265 && (bl == ivs->list && bl->next == 0))
8266 || (no_use_except_counting && ! loop_info->has_prefetch))
8268 rtx tem;
8270 /* Loop can be reversed. */
8271 if (loop_dump_stream)
8272 fprintf (loop_dump_stream, "Can reverse loop\n");
8274 /* Now check other conditions:
8276 The increment must be a constant, as must the initial value,
8277 and the comparison code must be LT.
8279 This test can probably be improved since +/- 1 in the constant
8280 can be obtained by changing LT to LE and vice versa; this is
8281 confusing. */
8283 if (comparison
8284 /* for constants, LE gets turned into LT */
8285 && (GET_CODE (comparison) == LT
8286 || (GET_CODE (comparison) == LE
8287 && no_use_except_counting)))
8289 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8290 rtx initial_value, comparison_value;
8291 int nonneg = 0;
8292 enum rtx_code cmp_code;
8293 int comparison_const_width;
8294 unsigned HOST_WIDE_INT comparison_sign_mask;
8296 add_val = INTVAL (bl->biv->add_val);
8297 comparison_value = XEXP (comparison, 1);
8298 if (GET_MODE (comparison_value) == VOIDmode)
8299 comparison_const_width
8300 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8301 else
8302 comparison_const_width
8303 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8304 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8305 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8306 comparison_sign_mask
8307 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8309 /* If the comparison value is not a loop invariant, then we
8310 can not reverse this loop.
8312 ??? If the insns which initialize the comparison value as
8313 a whole compute an invariant result, then we could move
8314 them out of the loop and proceed with loop reversal. */
8315 if (! loop_invariant_p (loop, comparison_value))
8316 return 0;
8318 if (GET_CODE (comparison_value) == CONST_INT)
8319 comparison_val = INTVAL (comparison_value);
8320 initial_value = bl->initial_value;
8322 /* Normalize the initial value if it is an integer and
8323 has no other use except as a counter. This will allow
8324 a few more loops to be reversed. */
8325 if (no_use_except_counting
8326 && GET_CODE (comparison_value) == CONST_INT
8327 && GET_CODE (initial_value) == CONST_INT)
8329 comparison_val = comparison_val - INTVAL (bl->initial_value);
8330 /* The code below requires comparison_val to be a multiple
8331 of add_val in order to do the loop reversal, so
8332 round up comparison_val to a multiple of add_val.
8333 Since comparison_value is constant, we know that the
8334 current comparison code is LT. */
8335 comparison_val = comparison_val + add_val - 1;
8336 comparison_val
8337 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8338 /* We postpone overflow checks for COMPARISON_VAL here;
8339 even if there is an overflow, we might still be able to
8340 reverse the loop, if converting the loop exit test to
8341 NE is possible. */
8342 initial_value = const0_rtx;
8345 /* First check if we can do a vanilla loop reversal. */
8346 if (initial_value == const0_rtx
8347 /* If we have a decrement_and_branch_on_count,
8348 prefer the NE test, since this will allow that
8349 instruction to be generated. Note that we must
8350 use a vanilla loop reversal if the biv is used to
8351 calculate a giv or has a non-counting use. */
8352 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8353 && defined (HAVE_decrement_and_branch_on_count)
8354 && (! (add_val == 1 && loop->vtop
8355 && (bl->biv_count == 0
8356 || no_use_except_counting)))
8357 #endif
8358 && GET_CODE (comparison_value) == CONST_INT
8359 /* Now do postponed overflow checks on COMPARISON_VAL. */
8360 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8361 & comparison_sign_mask))
8363 /* Register will always be nonnegative, with value
8364 0 on last iteration */
8365 add_adjust = add_val;
8366 nonneg = 1;
8367 cmp_code = GE;
8369 else if (add_val == 1 && loop->vtop
8370 && (bl->biv_count == 0
8371 || no_use_except_counting))
8373 add_adjust = 0;
8374 cmp_code = NE;
8376 else
8377 return 0;
8379 if (GET_CODE (comparison) == LE)
8380 add_adjust -= add_val;
8382 /* If the initial value is not zero, or if the comparison
8383 value is not an exact multiple of the increment, then we
8384 can not reverse this loop. */
8385 if (initial_value == const0_rtx
8386 && GET_CODE (comparison_value) == CONST_INT)
8388 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8389 return 0;
8391 else
8393 if (! no_use_except_counting || add_val != 1)
8394 return 0;
8397 final_value = comparison_value;
8399 /* Reset these in case we normalized the initial value
8400 and comparison value above. */
8401 if (GET_CODE (comparison_value) == CONST_INT
8402 && GET_CODE (initial_value) == CONST_INT)
8404 comparison_value = GEN_INT (comparison_val);
8405 final_value
8406 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8408 bl->initial_value = initial_value;
8410 /* Save some info needed to produce the new insns. */
8411 reg = bl->biv->dest_reg;
8412 jump_label = condjump_label (PREV_INSN (loop_end));
8413 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8415 /* Set start_value; if this is not a CONST_INT, we need
8416 to generate a SUB.
8417 Initialize biv to start_value before loop start.
8418 The old initializing insn will be deleted as a
8419 dead store by flow.c. */
8420 if (initial_value == const0_rtx
8421 && GET_CODE (comparison_value) == CONST_INT)
8423 start_value = GEN_INT (comparison_val - add_adjust);
8424 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8426 else if (GET_CODE (initial_value) == CONST_INT)
8428 enum machine_mode mode = GET_MODE (reg);
8429 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8430 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8432 if (add_insn == 0)
8433 return 0;
8435 start_value
8436 = gen_rtx_PLUS (mode, comparison_value, offset);
8437 loop_insn_hoist (loop, add_insn);
8438 if (GET_CODE (comparison) == LE)
8439 final_value = gen_rtx_PLUS (mode, comparison_value,
8440 GEN_INT (add_val));
8442 else if (! add_adjust)
8444 enum machine_mode mode = GET_MODE (reg);
8445 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8446 initial_value);
8448 if (sub_insn == 0)
8449 return 0;
8450 start_value
8451 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8452 loop_insn_hoist (loop, sub_insn);
8454 else
8455 /* We could handle the other cases too, but it'll be
8456 better to have a testcase first. */
8457 return 0;
8459 /* We may not have a single insn which can increment a reg, so
8460 create a sequence to hold all the insns from expand_inc. */
8461 start_sequence ();
8462 expand_inc (reg, new_add_val);
8463 tem = get_insns ();
8464 end_sequence ();
8466 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8467 delete_insn (bl->biv->insn);
8469 /* Update biv info to reflect its new status. */
8470 bl->biv->insn = p;
8471 bl->initial_value = start_value;
8472 bl->biv->add_val = new_add_val;
8474 /* Update loop info. */
8475 loop_info->initial_value = reg;
8476 loop_info->initial_equiv_value = reg;
8477 loop_info->final_value = const0_rtx;
8478 loop_info->final_equiv_value = const0_rtx;
8479 loop_info->comparison_value = const0_rtx;
8480 loop_info->comparison_code = cmp_code;
8481 loop_info->increment = new_add_val;
8483 /* Inc LABEL_NUSES so that delete_insn will
8484 not delete the label. */
8485 LABEL_NUSES (XEXP (jump_label, 0))++;
8487 /* Emit an insn after the end of the loop to set the biv's
8488 proper exit value if it is used anywhere outside the loop. */
8489 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8490 || ! bl->init_insn
8491 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8492 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8494 /* Delete compare/branch at end of loop. */
8495 delete_related_insns (PREV_INSN (loop_end));
8496 if (compare_and_branch == 2)
8497 delete_related_insns (first_compare);
8499 /* Add new compare/branch insn at end of loop. */
8500 start_sequence ();
8501 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8502 GET_MODE (reg), 0,
8503 XEXP (jump_label, 0));
8504 tem = get_insns ();
8505 end_sequence ();
8506 emit_jump_insn_before (tem, loop_end);
8508 for (tem = PREV_INSN (loop_end);
8509 tem && GET_CODE (tem) != JUMP_INSN;
8510 tem = PREV_INSN (tem))
8513 if (tem)
8514 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8516 if (nonneg)
8518 if (tem)
8520 /* Increment of LABEL_NUSES done above. */
8521 /* Register is now always nonnegative,
8522 so add REG_NONNEG note to the branch. */
8523 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8524 REG_NOTES (tem));
8526 bl->nonneg = 1;
8529 /* No insn may reference both the reversed and another biv or it
8530 will fail (see comment near the top of the loop reversal
8531 code).
8532 Earlier on, we have verified that the biv has no use except
8533 counting, or it is the only biv in this function.
8534 However, the code that computes no_use_except_counting does
8535 not verify reg notes. It's possible to have an insn that
8536 references another biv, and has a REG_EQUAL note with an
8537 expression based on the reversed biv. To avoid this case,
8538 remove all REG_EQUAL notes based on the reversed biv
8539 here. */
8540 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8541 if (INSN_P (p))
8543 rtx *pnote;
8544 rtx set = single_set (p);
8545 /* If this is a set of a GIV based on the reversed biv, any
8546 REG_EQUAL notes should still be correct. */
8547 if (! set
8548 || GET_CODE (SET_DEST (set)) != REG
8549 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8550 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8551 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8552 for (pnote = &REG_NOTES (p); *pnote;)
8554 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8555 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8556 XEXP (*pnote, 0)))
8557 *pnote = XEXP (*pnote, 1);
8558 else
8559 pnote = &XEXP (*pnote, 1);
8563 /* Mark that this biv has been reversed. Each giv which depends
8564 on this biv, and which is also live past the end of the loop
8565 will have to be fixed up. */
8567 bl->reversed = 1;
8569 if (loop_dump_stream)
8571 fprintf (loop_dump_stream, "Reversed loop");
8572 if (bl->nonneg)
8573 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8574 else
8575 fprintf (loop_dump_stream, "\n");
8578 return 1;
8583 return 0;
8586 /* Verify whether the biv BL appears to be eliminable,
8587 based on the insns in the loop that refer to it.
8589 If ELIMINATE_P is non-zero, actually do the elimination.
8591 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8592 determine whether invariant insns should be placed inside or at the
8593 start of the loop. */
8595 static int
8596 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8597 const struct loop *loop;
8598 struct iv_class *bl;
8599 int eliminate_p;
8600 int threshold, insn_count;
8602 struct loop_ivs *ivs = LOOP_IVS (loop);
8603 rtx reg = bl->biv->dest_reg;
8604 rtx p;
8606 /* Scan all insns in the loop, stopping if we find one that uses the
8607 biv in a way that we cannot eliminate. */
8609 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8611 enum rtx_code code = GET_CODE (p);
8612 basic_block where_bb = 0;
8613 rtx where_insn = threshold >= insn_count ? 0 : p;
8615 /* If this is a libcall that sets a giv, skip ahead to its end. */
8616 if (GET_RTX_CLASS (code) == 'i')
8618 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8620 if (note)
8622 rtx last = XEXP (note, 0);
8623 rtx set = single_set (last);
8625 if (set && GET_CODE (SET_DEST (set)) == REG)
8627 unsigned int regno = REGNO (SET_DEST (set));
8629 if (regno < ivs->n_regs
8630 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8631 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8632 p = last;
8636 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8637 && reg_mentioned_p (reg, PATTERN (p))
8638 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8639 eliminate_p, where_bb, where_insn))
8641 if (loop_dump_stream)
8642 fprintf (loop_dump_stream,
8643 "Cannot eliminate biv %d: biv used in insn %d.\n",
8644 bl->regno, INSN_UID (p));
8645 break;
8649 if (p == loop->end)
8651 if (loop_dump_stream)
8652 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8653 bl->regno, eliminate_p ? "was" : "can be");
8654 return 1;
8657 return 0;
8660 /* INSN and REFERENCE are instructions in the same insn chain.
8661 Return non-zero if INSN is first. */
8664 loop_insn_first_p (insn, reference)
8665 rtx insn, reference;
8667 rtx p, q;
8669 for (p = insn, q = reference;;)
8671 /* Start with test for not first so that INSN == REFERENCE yields not
8672 first. */
8673 if (q == insn || ! p)
8674 return 0;
8675 if (p == reference || ! q)
8676 return 1;
8678 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8679 previous insn, hence the <= comparison below does not work if
8680 P is a note. */
8681 if (INSN_UID (p) < max_uid_for_loop
8682 && INSN_UID (q) < max_uid_for_loop
8683 && GET_CODE (p) != NOTE)
8684 return INSN_LUID (p) <= INSN_LUID (q);
8686 if (INSN_UID (p) >= max_uid_for_loop
8687 || GET_CODE (p) == NOTE)
8688 p = NEXT_INSN (p);
8689 if (INSN_UID (q) >= max_uid_for_loop)
8690 q = NEXT_INSN (q);
8694 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8695 the offset that we have to take into account due to auto-increment /
8696 div derivation is zero. */
8697 static int
8698 biv_elimination_giv_has_0_offset (biv, giv, insn)
8699 struct induction *biv, *giv;
8700 rtx insn;
8702 /* If the giv V had the auto-inc address optimization applied
8703 to it, and INSN occurs between the giv insn and the biv
8704 insn, then we'd have to adjust the value used here.
8705 This is rare, so we don't bother to make this possible. */
8706 if (giv->auto_inc_opt
8707 && ((loop_insn_first_p (giv->insn, insn)
8708 && loop_insn_first_p (insn, biv->insn))
8709 || (loop_insn_first_p (biv->insn, insn)
8710 && loop_insn_first_p (insn, giv->insn))))
8711 return 0;
8713 return 1;
8716 /* If BL appears in X (part of the pattern of INSN), see if we can
8717 eliminate its use. If so, return 1. If not, return 0.
8719 If BIV does not appear in X, return 1.
8721 If ELIMINATE_P is non-zero, actually do the elimination.
8722 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8723 Depending on how many items have been moved out of the loop, it
8724 will either be before INSN (when WHERE_INSN is non-zero) or at the
8725 start of the loop (when WHERE_INSN is zero). */
8727 static int
8728 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8729 const struct loop *loop;
8730 rtx x, insn;
8731 struct iv_class *bl;
8732 int eliminate_p;
8733 basic_block where_bb;
8734 rtx where_insn;
8736 enum rtx_code code = GET_CODE (x);
8737 rtx reg = bl->biv->dest_reg;
8738 enum machine_mode mode = GET_MODE (reg);
8739 struct induction *v;
8740 rtx arg, tem;
8741 #ifdef HAVE_cc0
8742 rtx new;
8743 #endif
8744 int arg_operand;
8745 const char *fmt;
8746 int i, j;
8748 switch (code)
8750 case REG:
8751 /* If we haven't already been able to do something with this BIV,
8752 we can't eliminate it. */
8753 if (x == reg)
8754 return 0;
8755 return 1;
8757 case SET:
8758 /* If this sets the BIV, it is not a problem. */
8759 if (SET_DEST (x) == reg)
8760 return 1;
8762 /* If this is an insn that defines a giv, it is also ok because
8763 it will go away when the giv is reduced. */
8764 for (v = bl->giv; v; v = v->next_iv)
8765 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8766 return 1;
8768 #ifdef HAVE_cc0
8769 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8771 /* Can replace with any giv that was reduced and
8772 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8773 Require a constant for MULT_VAL, so we know it's nonzero.
8774 ??? We disable this optimization to avoid potential
8775 overflows. */
8777 for (v = bl->giv; v; v = v->next_iv)
8778 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8779 && v->add_val == const0_rtx
8780 && ! v->ignore && ! v->maybe_dead && v->always_computable
8781 && v->mode == mode
8782 && 0)
8784 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8785 continue;
8787 if (! eliminate_p)
8788 return 1;
8790 /* If the giv has the opposite direction of change,
8791 then reverse the comparison. */
8792 if (INTVAL (v->mult_val) < 0)
8793 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8794 const0_rtx, v->new_reg);
8795 else
8796 new = v->new_reg;
8798 /* We can probably test that giv's reduced reg. */
8799 if (validate_change (insn, &SET_SRC (x), new, 0))
8800 return 1;
8803 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8804 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8805 Require a constant for MULT_VAL, so we know it's nonzero.
8806 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8807 overflow problem. */
8809 for (v = bl->giv; v; v = v->next_iv)
8810 if (GET_CODE (v->mult_val) == CONST_INT
8811 && v->mult_val != const0_rtx
8812 && ! v->ignore && ! v->maybe_dead && v->always_computable
8813 && v->mode == mode
8814 && (GET_CODE (v->add_val) == SYMBOL_REF
8815 || GET_CODE (v->add_val) == LABEL_REF
8816 || GET_CODE (v->add_val) == CONST
8817 || (GET_CODE (v->add_val) == REG
8818 && REG_POINTER (v->add_val))))
8820 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8821 continue;
8823 if (! eliminate_p)
8824 return 1;
8826 /* If the giv has the opposite direction of change,
8827 then reverse the comparison. */
8828 if (INTVAL (v->mult_val) < 0)
8829 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8830 v->new_reg);
8831 else
8832 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8833 copy_rtx (v->add_val));
8835 /* Replace biv with the giv's reduced register. */
8836 update_reg_last_use (v->add_val, insn);
8837 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8838 return 1;
8840 /* Insn doesn't support that constant or invariant. Copy it
8841 into a register (it will be a loop invariant.) */
8842 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8844 loop_insn_emit_before (loop, 0, where_insn,
8845 gen_move_insn (tem,
8846 copy_rtx (v->add_val)));
8848 /* Substitute the new register for its invariant value in
8849 the compare expression. */
8850 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8851 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8852 return 1;
8855 #endif
8856 break;
8858 case COMPARE:
8859 case EQ: case NE:
8860 case GT: case GE: case GTU: case GEU:
8861 case LT: case LE: case LTU: case LEU:
8862 /* See if either argument is the biv. */
8863 if (XEXP (x, 0) == reg)
8864 arg = XEXP (x, 1), arg_operand = 1;
8865 else if (XEXP (x, 1) == reg)
8866 arg = XEXP (x, 0), arg_operand = 0;
8867 else
8868 break;
8870 if (CONSTANT_P (arg))
8872 /* First try to replace with any giv that has constant positive
8873 mult_val and constant add_val. We might be able to support
8874 negative mult_val, but it seems complex to do it in general. */
8876 for (v = bl->giv; v; v = v->next_iv)
8877 if (GET_CODE (v->mult_val) == CONST_INT
8878 && INTVAL (v->mult_val) > 0
8879 && (GET_CODE (v->add_val) == SYMBOL_REF
8880 || GET_CODE (v->add_val) == LABEL_REF
8881 || GET_CODE (v->add_val) == CONST
8882 || (GET_CODE (v->add_val) == REG
8883 && REG_POINTER (v->add_val)))
8884 && ! v->ignore && ! v->maybe_dead && v->always_computable
8885 && v->mode == mode)
8887 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8888 continue;
8890 /* Don't eliminate if the linear combination that makes up
8891 the giv overflows when it is applied to ARG. */
8892 if (GET_CODE (arg) == CONST_INT)
8894 rtx add_val;
8896 if (GET_CODE (v->add_val) == CONST_INT)
8897 add_val = v->add_val;
8898 else
8899 add_val = const0_rtx;
8901 if (const_mult_add_overflow_p (arg, v->mult_val,
8902 add_val, mode, 1))
8903 continue;
8906 if (! eliminate_p)
8907 return 1;
8909 /* Replace biv with the giv's reduced reg. */
8910 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8912 /* If all constants are actually constant integers and
8913 the derived constant can be directly placed in the COMPARE,
8914 do so. */
8915 if (GET_CODE (arg) == CONST_INT
8916 && GET_CODE (v->add_val) == CONST_INT)
8918 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8919 v->add_val, mode, 1);
8921 else
8923 /* Otherwise, load it into a register. */
8924 tem = gen_reg_rtx (mode);
8925 loop_iv_add_mult_emit_before (loop, arg,
8926 v->mult_val, v->add_val,
8927 tem, where_bb, where_insn);
8930 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8932 if (apply_change_group ())
8933 return 1;
8936 /* Look for giv with positive constant mult_val and nonconst add_val.
8937 Insert insns to calculate new compare value.
8938 ??? Turn this off due to possible overflow. */
8940 for (v = bl->giv; v; v = v->next_iv)
8941 if (GET_CODE (v->mult_val) == CONST_INT
8942 && INTVAL (v->mult_val) > 0
8943 && ! v->ignore && ! v->maybe_dead && v->always_computable
8944 && v->mode == mode
8945 && 0)
8947 rtx tem;
8949 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8950 continue;
8952 if (! eliminate_p)
8953 return 1;
8955 tem = gen_reg_rtx (mode);
8957 /* Replace biv with giv's reduced register. */
8958 validate_change (insn, &XEXP (x, 1 - arg_operand),
8959 v->new_reg, 1);
8961 /* Compute value to compare against. */
8962 loop_iv_add_mult_emit_before (loop, arg,
8963 v->mult_val, v->add_val,
8964 tem, where_bb, where_insn);
8965 /* Use it in this insn. */
8966 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8967 if (apply_change_group ())
8968 return 1;
8971 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8973 if (loop_invariant_p (loop, arg) == 1)
8975 /* Look for giv with constant positive mult_val and nonconst
8976 add_val. Insert insns to compute new compare value.
8977 ??? Turn this off due to possible overflow. */
8979 for (v = bl->giv; v; v = v->next_iv)
8980 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8981 && ! v->ignore && ! v->maybe_dead && v->always_computable
8982 && v->mode == mode
8983 && 0)
8985 rtx tem;
8987 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8988 continue;
8990 if (! eliminate_p)
8991 return 1;
8993 tem = gen_reg_rtx (mode);
8995 /* Replace biv with giv's reduced register. */
8996 validate_change (insn, &XEXP (x, 1 - arg_operand),
8997 v->new_reg, 1);
8999 /* Compute value to compare against. */
9000 loop_iv_add_mult_emit_before (loop, arg,
9001 v->mult_val, v->add_val,
9002 tem, where_bb, where_insn);
9003 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
9004 if (apply_change_group ())
9005 return 1;
9009 /* This code has problems. Basically, you can't know when
9010 seeing if we will eliminate BL, whether a particular giv
9011 of ARG will be reduced. If it isn't going to be reduced,
9012 we can't eliminate BL. We can try forcing it to be reduced,
9013 but that can generate poor code.
9015 The problem is that the benefit of reducing TV, below should
9016 be increased if BL can actually be eliminated, but this means
9017 we might have to do a topological sort of the order in which
9018 we try to process biv. It doesn't seem worthwhile to do
9019 this sort of thing now. */
9021 #if 0
9022 /* Otherwise the reg compared with had better be a biv. */
9023 if (GET_CODE (arg) != REG
9024 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9025 return 0;
9027 /* Look for a pair of givs, one for each biv,
9028 with identical coefficients. */
9029 for (v = bl->giv; v; v = v->next_iv)
9031 struct induction *tv;
9033 if (v->ignore || v->maybe_dead || v->mode != mode)
9034 continue;
9036 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9037 tv = tv->next_iv)
9038 if (! tv->ignore && ! tv->maybe_dead
9039 && rtx_equal_p (tv->mult_val, v->mult_val)
9040 && rtx_equal_p (tv->add_val, v->add_val)
9041 && tv->mode == mode)
9043 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9044 continue;
9046 if (! eliminate_p)
9047 return 1;
9049 /* Replace biv with its giv's reduced reg. */
9050 XEXP (x, 1 - arg_operand) = v->new_reg;
9051 /* Replace other operand with the other giv's
9052 reduced reg. */
9053 XEXP (x, arg_operand) = tv->new_reg;
9054 return 1;
9057 #endif
9060 /* If we get here, the biv can't be eliminated. */
9061 return 0;
9063 case MEM:
9064 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9065 biv is used in it, since it will be replaced. */
9066 for (v = bl->giv; v; v = v->next_iv)
9067 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9068 return 1;
9069 break;
9071 default:
9072 break;
9075 /* See if any subexpression fails elimination. */
9076 fmt = GET_RTX_FORMAT (code);
9077 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9079 switch (fmt[i])
9081 case 'e':
9082 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9083 eliminate_p, where_bb, where_insn))
9084 return 0;
9085 break;
9087 case 'E':
9088 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9089 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9090 eliminate_p, where_bb, where_insn))
9091 return 0;
9092 break;
9096 return 1;
9099 /* Return nonzero if the last use of REG
9100 is in an insn following INSN in the same basic block. */
9102 static int
9103 last_use_this_basic_block (reg, insn)
9104 rtx reg;
9105 rtx insn;
9107 rtx n;
9108 for (n = insn;
9109 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9110 n = NEXT_INSN (n))
9112 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9113 return 1;
9115 return 0;
9118 /* Called via `note_stores' to record the initial value of a biv. Here we
9119 just record the location of the set and process it later. */
9121 static void
9122 record_initial (dest, set, data)
9123 rtx dest;
9124 rtx set;
9125 void *data ATTRIBUTE_UNUSED;
9127 struct loop_ivs *ivs = (struct loop_ivs *) data;
9128 struct iv_class *bl;
9130 if (GET_CODE (dest) != REG
9131 || REGNO (dest) >= ivs->n_regs
9132 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9133 return;
9135 bl = REG_IV_CLASS (ivs, REGNO (dest));
9137 /* If this is the first set found, record it. */
9138 if (bl->init_insn == 0)
9140 bl->init_insn = note_insn;
9141 bl->init_set = set;
9145 /* If any of the registers in X are "old" and currently have a last use earlier
9146 than INSN, update them to have a last use of INSN. Their actual last use
9147 will be the previous insn but it will not have a valid uid_luid so we can't
9148 use it. X must be a source expression only. */
9150 static void
9151 update_reg_last_use (x, insn)
9152 rtx x;
9153 rtx insn;
9155 /* Check for the case where INSN does not have a valid luid. In this case,
9156 there is no need to modify the regno_last_uid, as this can only happen
9157 when code is inserted after the loop_end to set a pseudo's final value,
9158 and hence this insn will never be the last use of x.
9159 ???? This comment is not correct. See for example loop_givs_reduce.
9160 This may insert an insn before another new insn. */
9161 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9162 && INSN_UID (insn) < max_uid_for_loop
9163 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9165 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9167 else
9169 int i, j;
9170 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9171 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9173 if (fmt[i] == 'e')
9174 update_reg_last_use (XEXP (x, i), insn);
9175 else if (fmt[i] == 'E')
9176 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9177 update_reg_last_use (XVECEXP (x, i, j), insn);
9182 /* Given an insn INSN and condition COND, return the condition in a
9183 canonical form to simplify testing by callers. Specifically:
9185 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9186 (2) Both operands will be machine operands; (cc0) will have been replaced.
9187 (3) If an operand is a constant, it will be the second operand.
9188 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9189 for GE, GEU, and LEU.
9191 If the condition cannot be understood, or is an inequality floating-point
9192 comparison which needs to be reversed, 0 will be returned.
9194 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9196 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9197 insn used in locating the condition was found. If a replacement test
9198 of the condition is desired, it should be placed in front of that
9199 insn and we will be sure that the inputs are still valid.
9201 If WANT_REG is non-zero, we wish the condition to be relative to that
9202 register, if possible. Therefore, do not canonicalize the condition
9203 further. */
9206 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9207 rtx insn;
9208 rtx cond;
9209 int reverse;
9210 rtx *earliest;
9211 rtx want_reg;
9213 enum rtx_code code;
9214 rtx prev = insn;
9215 rtx set;
9216 rtx tem;
9217 rtx op0, op1;
9218 int reverse_code = 0;
9219 enum machine_mode mode;
9221 code = GET_CODE (cond);
9222 mode = GET_MODE (cond);
9223 op0 = XEXP (cond, 0);
9224 op1 = XEXP (cond, 1);
9226 if (reverse)
9227 code = reversed_comparison_code (cond, insn);
9228 if (code == UNKNOWN)
9229 return 0;
9231 if (earliest)
9232 *earliest = insn;
9234 /* If we are comparing a register with zero, see if the register is set
9235 in the previous insn to a COMPARE or a comparison operation. Perform
9236 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9237 in cse.c */
9239 while (GET_RTX_CLASS (code) == '<'
9240 && op1 == CONST0_RTX (GET_MODE (op0))
9241 && op0 != want_reg)
9243 /* Set non-zero when we find something of interest. */
9244 rtx x = 0;
9246 #ifdef HAVE_cc0
9247 /* If comparison with cc0, import actual comparison from compare
9248 insn. */
9249 if (op0 == cc0_rtx)
9251 if ((prev = prev_nonnote_insn (prev)) == 0
9252 || GET_CODE (prev) != INSN
9253 || (set = single_set (prev)) == 0
9254 || SET_DEST (set) != cc0_rtx)
9255 return 0;
9257 op0 = SET_SRC (set);
9258 op1 = CONST0_RTX (GET_MODE (op0));
9259 if (earliest)
9260 *earliest = prev;
9262 #endif
9264 /* If this is a COMPARE, pick up the two things being compared. */
9265 if (GET_CODE (op0) == COMPARE)
9267 op1 = XEXP (op0, 1);
9268 op0 = XEXP (op0, 0);
9269 continue;
9271 else if (GET_CODE (op0) != REG)
9272 break;
9274 /* Go back to the previous insn. Stop if it is not an INSN. We also
9275 stop if it isn't a single set or if it has a REG_INC note because
9276 we don't want to bother dealing with it. */
9278 if ((prev = prev_nonnote_insn (prev)) == 0
9279 || GET_CODE (prev) != INSN
9280 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9281 break;
9283 set = set_of (op0, prev);
9285 if (set
9286 && (GET_CODE (set) != SET
9287 || !rtx_equal_p (SET_DEST (set), op0)))
9288 break;
9290 /* If this is setting OP0, get what it sets it to if it looks
9291 relevant. */
9292 if (set)
9294 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9296 /* ??? We may not combine comparisons done in a CCmode with
9297 comparisons not done in a CCmode. This is to aid targets
9298 like Alpha that have an IEEE compliant EQ instruction, and
9299 a non-IEEE compliant BEQ instruction. The use of CCmode is
9300 actually artificial, simply to prevent the combination, but
9301 should not affect other platforms.
9303 However, we must allow VOIDmode comparisons to match either
9304 CCmode or non-CCmode comparison, because some ports have
9305 modeless comparisons inside branch patterns.
9307 ??? This mode check should perhaps look more like the mode check
9308 in simplify_comparison in combine. */
9310 if ((GET_CODE (SET_SRC (set)) == COMPARE
9311 || (((code == NE
9312 || (code == LT
9313 && GET_MODE_CLASS (inner_mode) == MODE_INT
9314 && (GET_MODE_BITSIZE (inner_mode)
9315 <= HOST_BITS_PER_WIDE_INT)
9316 && (STORE_FLAG_VALUE
9317 & ((HOST_WIDE_INT) 1
9318 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9319 #ifdef FLOAT_STORE_FLAG_VALUE
9320 || (code == LT
9321 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9322 && (REAL_VALUE_NEGATIVE
9323 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9324 #endif
9326 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9327 && (((GET_MODE_CLASS (mode) == MODE_CC)
9328 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9329 || mode == VOIDmode || inner_mode == VOIDmode))
9330 x = SET_SRC (set);
9331 else if (((code == EQ
9332 || (code == GE
9333 && (GET_MODE_BITSIZE (inner_mode)
9334 <= HOST_BITS_PER_WIDE_INT)
9335 && GET_MODE_CLASS (inner_mode) == MODE_INT
9336 && (STORE_FLAG_VALUE
9337 & ((HOST_WIDE_INT) 1
9338 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9339 #ifdef FLOAT_STORE_FLAG_VALUE
9340 || (code == GE
9341 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9342 && (REAL_VALUE_NEGATIVE
9343 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9344 #endif
9346 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9347 && (((GET_MODE_CLASS (mode) == MODE_CC)
9348 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9349 || mode == VOIDmode || inner_mode == VOIDmode))
9352 reverse_code = 1;
9353 x = SET_SRC (set);
9355 else
9356 break;
9359 else if (reg_set_p (op0, prev))
9360 /* If this sets OP0, but not directly, we have to give up. */
9361 break;
9363 if (x)
9365 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9366 code = GET_CODE (x);
9367 if (reverse_code)
9369 code = reversed_comparison_code (x, prev);
9370 if (code == UNKNOWN)
9371 return 0;
9372 reverse_code = 0;
9375 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9376 if (earliest)
9377 *earliest = prev;
9381 /* If constant is first, put it last. */
9382 if (CONSTANT_P (op0))
9383 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9385 /* If OP0 is the result of a comparison, we weren't able to find what
9386 was really being compared, so fail. */
9387 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9388 return 0;
9390 /* Canonicalize any ordered comparison with integers involving equality
9391 if we can do computations in the relevant mode and we do not
9392 overflow. */
9394 if (GET_CODE (op1) == CONST_INT
9395 && GET_MODE (op0) != VOIDmode
9396 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9398 HOST_WIDE_INT const_val = INTVAL (op1);
9399 unsigned HOST_WIDE_INT uconst_val = const_val;
9400 unsigned HOST_WIDE_INT max_val
9401 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9403 switch (code)
9405 case LE:
9406 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9407 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9408 break;
9410 /* When cross-compiling, const_val might be sign-extended from
9411 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9412 case GE:
9413 if ((HOST_WIDE_INT) (const_val & max_val)
9414 != (((HOST_WIDE_INT) 1
9415 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9416 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9417 break;
9419 case LEU:
9420 if (uconst_val < max_val)
9421 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9422 break;
9424 case GEU:
9425 if (uconst_val != 0)
9426 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9427 break;
9429 default:
9430 break;
9434 #ifdef HAVE_cc0
9435 /* Never return CC0; return zero instead. */
9436 if (op0 == cc0_rtx)
9437 return 0;
9438 #endif
9440 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9443 /* Given a jump insn JUMP, return the condition that will cause it to branch
9444 to its JUMP_LABEL. If the condition cannot be understood, or is an
9445 inequality floating-point comparison which needs to be reversed, 0 will
9446 be returned.
9448 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9449 insn used in locating the condition was found. If a replacement test
9450 of the condition is desired, it should be placed in front of that
9451 insn and we will be sure that the inputs are still valid. */
9454 get_condition (jump, earliest)
9455 rtx jump;
9456 rtx *earliest;
9458 rtx cond;
9459 int reverse;
9460 rtx set;
9462 /* If this is not a standard conditional jump, we can't parse it. */
9463 if (GET_CODE (jump) != JUMP_INSN
9464 || ! any_condjump_p (jump))
9465 return 0;
9466 set = pc_set (jump);
9468 cond = XEXP (SET_SRC (set), 0);
9470 /* If this branches to JUMP_LABEL when the condition is false, reverse
9471 the condition. */
9472 reverse
9473 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9474 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9476 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9479 /* Similar to above routine, except that we also put an invariant last
9480 unless both operands are invariants. */
9483 get_condition_for_loop (loop, x)
9484 const struct loop *loop;
9485 rtx x;
9487 rtx comparison = get_condition (x, (rtx*) 0);
9489 if (comparison == 0
9490 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9491 || loop_invariant_p (loop, XEXP (comparison, 1)))
9492 return comparison;
9494 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9495 XEXP (comparison, 1), XEXP (comparison, 0));
9498 /* Scan the function and determine whether it has indirect (computed) jumps.
9500 This is taken mostly from flow.c; similar code exists elsewhere
9501 in the compiler. It may be useful to put this into rtlanal.c. */
9502 static int
9503 indirect_jump_in_function_p (start)
9504 rtx start;
9506 rtx insn;
9508 for (insn = start; insn; insn = NEXT_INSN (insn))
9509 if (computed_jump_p (insn))
9510 return 1;
9512 return 0;
9515 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9516 documentation for LOOP_MEMS for the definition of `appropriate'.
9517 This function is called from prescan_loop via for_each_rtx. */
9519 static int
9520 insert_loop_mem (mem, data)
9521 rtx *mem;
9522 void *data ATTRIBUTE_UNUSED;
9524 struct loop_info *loop_info = data;
9525 int i;
9526 rtx m = *mem;
9528 if (m == NULL_RTX)
9529 return 0;
9531 switch (GET_CODE (m))
9533 case MEM:
9534 break;
9536 case CLOBBER:
9537 /* We're not interested in MEMs that are only clobbered. */
9538 return -1;
9540 case CONST_DOUBLE:
9541 /* We're not interested in the MEM associated with a
9542 CONST_DOUBLE, so there's no need to traverse into this. */
9543 return -1;
9545 case EXPR_LIST:
9546 /* We're not interested in any MEMs that only appear in notes. */
9547 return -1;
9549 default:
9550 /* This is not a MEM. */
9551 return 0;
9554 /* See if we've already seen this MEM. */
9555 for (i = 0; i < loop_info->mems_idx; ++i)
9556 if (rtx_equal_p (m, loop_info->mems[i].mem))
9558 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9559 /* The modes of the two memory accesses are different. If
9560 this happens, something tricky is going on, and we just
9561 don't optimize accesses to this MEM. */
9562 loop_info->mems[i].optimize = 0;
9564 return 0;
9567 /* Resize the array, if necessary. */
9568 if (loop_info->mems_idx == loop_info->mems_allocated)
9570 if (loop_info->mems_allocated != 0)
9571 loop_info->mems_allocated *= 2;
9572 else
9573 loop_info->mems_allocated = 32;
9575 loop_info->mems = (loop_mem_info *)
9576 xrealloc (loop_info->mems,
9577 loop_info->mems_allocated * sizeof (loop_mem_info));
9580 /* Actually insert the MEM. */
9581 loop_info->mems[loop_info->mems_idx].mem = m;
9582 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9583 because we can't put it in a register. We still store it in the
9584 table, though, so that if we see the same address later, but in a
9585 non-BLK mode, we'll not think we can optimize it at that point. */
9586 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9587 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9588 ++loop_info->mems_idx;
9590 return 0;
9594 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9596 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9597 register that is modified by an insn between FROM and TO. If the
9598 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9599 more, stop incrementing it, to avoid overflow.
9601 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9602 register I is used, if it is only used once. Otherwise, it is set
9603 to 0 (for no uses) or const0_rtx for more than one use. This
9604 parameter may be zero, in which case this processing is not done.
9606 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9607 optimize register I. */
9609 static void
9610 loop_regs_scan (loop, extra_size)
9611 const struct loop *loop;
9612 int extra_size;
9614 struct loop_regs *regs = LOOP_REGS (loop);
9615 int old_nregs;
9616 /* last_set[n] is nonzero iff reg n has been set in the current
9617 basic block. In that case, it is the insn that last set reg n. */
9618 rtx *last_set;
9619 rtx insn;
9620 int i;
9622 old_nregs = regs->num;
9623 regs->num = max_reg_num ();
9625 /* Grow the regs array if not allocated or too small. */
9626 if (regs->num >= regs->size)
9628 regs->size = regs->num + extra_size;
9630 regs->array = (struct loop_reg *)
9631 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9633 /* Zero the new elements. */
9634 memset (regs->array + old_nregs, 0,
9635 (regs->size - old_nregs) * sizeof (*regs->array));
9638 /* Clear previously scanned fields but do not clear n_times_set. */
9639 for (i = 0; i < old_nregs; i++)
9641 regs->array[i].set_in_loop = 0;
9642 regs->array[i].may_not_optimize = 0;
9643 regs->array[i].single_usage = NULL_RTX;
9646 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9648 /* Scan the loop, recording register usage. */
9649 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9650 insn = NEXT_INSN (insn))
9652 if (INSN_P (insn))
9654 /* Record registers that have exactly one use. */
9655 find_single_use_in_loop (regs, insn, PATTERN (insn));
9657 /* Include uses in REG_EQUAL notes. */
9658 if (REG_NOTES (insn))
9659 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9661 if (GET_CODE (PATTERN (insn)) == SET
9662 || GET_CODE (PATTERN (insn)) == CLOBBER)
9663 count_one_set (regs, insn, PATTERN (insn), last_set);
9664 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9666 int i;
9667 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9668 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9669 last_set);
9673 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9674 memset (last_set, 0, regs->num * sizeof (rtx));
9677 /* Invalidate all hard registers clobbered by calls. With one exception:
9678 a call-clobbered PIC register is still function-invariant for our
9679 purposes, since we can hoist any PIC calculations out of the loop.
9680 Thus the call to rtx_varies_p. */
9681 if (LOOP_INFO (loop)->has_call)
9682 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9683 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9684 && rtx_varies_p (regno_reg_rtx[i], 1))
9686 regs->array[i].may_not_optimize = 1;
9687 regs->array[i].set_in_loop = 1;
9690 #ifdef AVOID_CCMODE_COPIES
9691 /* Don't try to move insns which set CC registers if we should not
9692 create CCmode register copies. */
9693 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9694 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9695 regs->array[i].may_not_optimize = 1;
9696 #endif
9698 /* Set regs->array[I].n_times_set for the new registers. */
9699 for (i = old_nregs; i < regs->num; i++)
9700 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9702 free (last_set);
9705 /* Returns the number of real INSNs in the LOOP. */
9707 static int
9708 count_insns_in_loop (loop)
9709 const struct loop *loop;
9711 int count = 0;
9712 rtx insn;
9714 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9715 insn = NEXT_INSN (insn))
9716 if (INSN_P (insn))
9717 ++count;
9719 return count;
9722 /* Move MEMs into registers for the duration of the loop. */
9724 static void
9725 load_mems (loop)
9726 const struct loop *loop;
9728 struct loop_info *loop_info = LOOP_INFO (loop);
9729 struct loop_regs *regs = LOOP_REGS (loop);
9730 int maybe_never = 0;
9731 int i;
9732 rtx p, prev_ebb_head;
9733 rtx label = NULL_RTX;
9734 rtx end_label;
9735 /* Nonzero if the next instruction may never be executed. */
9736 int next_maybe_never = 0;
9737 unsigned int last_max_reg = max_reg_num ();
9739 if (loop_info->mems_idx == 0)
9740 return;
9742 /* We cannot use next_label here because it skips over normal insns. */
9743 end_label = next_nonnote_insn (loop->end);
9744 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9745 end_label = NULL_RTX;
9747 /* Check to see if it's possible that some instructions in the loop are
9748 never executed. Also check if there is a goto out of the loop other
9749 than right after the end of the loop. */
9750 for (p = next_insn_in_loop (loop, loop->scan_start);
9751 p != NULL_RTX;
9752 p = next_insn_in_loop (loop, p))
9754 if (GET_CODE (p) == CODE_LABEL)
9755 maybe_never = 1;
9756 else if (GET_CODE (p) == JUMP_INSN
9757 /* If we enter the loop in the middle, and scan
9758 around to the beginning, don't set maybe_never
9759 for that. This must be an unconditional jump,
9760 otherwise the code at the top of the loop might
9761 never be executed. Unconditional jumps are
9762 followed a by barrier then loop end. */
9763 && ! (GET_CODE (p) == JUMP_INSN
9764 && JUMP_LABEL (p) == loop->top
9765 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9766 && any_uncondjump_p (p)))
9768 /* If this is a jump outside of the loop but not right
9769 after the end of the loop, we would have to emit new fixup
9770 sequences for each such label. */
9771 if (/* If we can't tell where control might go when this
9772 JUMP_INSN is executed, we must be conservative. */
9773 !JUMP_LABEL (p)
9774 || (JUMP_LABEL (p) != end_label
9775 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9776 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9777 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9778 return;
9780 if (!any_condjump_p (p))
9781 /* Something complicated. */
9782 maybe_never = 1;
9783 else
9784 /* If there are any more instructions in the loop, they
9785 might not be reached. */
9786 next_maybe_never = 1;
9788 else if (next_maybe_never)
9789 maybe_never = 1;
9792 /* Find start of the extended basic block that enters the loop. */
9793 for (p = loop->start;
9794 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9795 p = PREV_INSN (p))
9797 prev_ebb_head = p;
9799 cselib_init ();
9801 /* Build table of mems that get set to constant values before the
9802 loop. */
9803 for (; p != loop->start; p = NEXT_INSN (p))
9804 cselib_process_insn (p);
9806 /* Actually move the MEMs. */
9807 for (i = 0; i < loop_info->mems_idx; ++i)
9809 regset_head load_copies;
9810 regset_head store_copies;
9811 int written = 0;
9812 rtx reg;
9813 rtx mem = loop_info->mems[i].mem;
9814 rtx mem_list_entry;
9816 if (MEM_VOLATILE_P (mem)
9817 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9818 /* There's no telling whether or not MEM is modified. */
9819 loop_info->mems[i].optimize = 0;
9821 /* Go through the MEMs written to in the loop to see if this
9822 one is aliased by one of them. */
9823 mem_list_entry = loop_info->store_mems;
9824 while (mem_list_entry)
9826 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9827 written = 1;
9828 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9829 mem, rtx_varies_p))
9831 /* MEM is indeed aliased by this store. */
9832 loop_info->mems[i].optimize = 0;
9833 break;
9835 mem_list_entry = XEXP (mem_list_entry, 1);
9838 if (flag_float_store && written
9839 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9840 loop_info->mems[i].optimize = 0;
9842 /* If this MEM is written to, we must be sure that there
9843 are no reads from another MEM that aliases this one. */
9844 if (loop_info->mems[i].optimize && written)
9846 int j;
9848 for (j = 0; j < loop_info->mems_idx; ++j)
9850 if (j == i)
9851 continue;
9852 else if (true_dependence (mem,
9853 VOIDmode,
9854 loop_info->mems[j].mem,
9855 rtx_varies_p))
9857 /* It's not safe to hoist loop_info->mems[i] out of
9858 the loop because writes to it might not be
9859 seen by reads from loop_info->mems[j]. */
9860 loop_info->mems[i].optimize = 0;
9861 break;
9866 if (maybe_never && may_trap_p (mem))
9867 /* We can't access the MEM outside the loop; it might
9868 cause a trap that wouldn't have happened otherwise. */
9869 loop_info->mems[i].optimize = 0;
9871 if (!loop_info->mems[i].optimize)
9872 /* We thought we were going to lift this MEM out of the
9873 loop, but later discovered that we could not. */
9874 continue;
9876 INIT_REG_SET (&load_copies);
9877 INIT_REG_SET (&store_copies);
9879 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9880 order to keep scan_loop from moving stores to this MEM
9881 out of the loop just because this REG is neither a
9882 user-variable nor used in the loop test. */
9883 reg = gen_reg_rtx (GET_MODE (mem));
9884 REG_USERVAR_P (reg) = 1;
9885 loop_info->mems[i].reg = reg;
9887 /* Now, replace all references to the MEM with the
9888 corresponding pseudos. */
9889 maybe_never = 0;
9890 for (p = next_insn_in_loop (loop, loop->scan_start);
9891 p != NULL_RTX;
9892 p = next_insn_in_loop (loop, p))
9894 if (INSN_P (p))
9896 rtx set;
9898 set = single_set (p);
9900 /* See if this copies the mem into a register that isn't
9901 modified afterwards. We'll try to do copy propagation
9902 a little further on. */
9903 if (set
9904 /* @@@ This test is _way_ too conservative. */
9905 && ! maybe_never
9906 && GET_CODE (SET_DEST (set)) == REG
9907 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9908 && REGNO (SET_DEST (set)) < last_max_reg
9909 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9910 && rtx_equal_p (SET_SRC (set), mem))
9911 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9913 /* See if this copies the mem from a register that isn't
9914 modified afterwards. We'll try to remove the
9915 redundant copy later on by doing a little register
9916 renaming and copy propagation. This will help
9917 to untangle things for the BIV detection code. */
9918 if (set
9919 && ! maybe_never
9920 && GET_CODE (SET_SRC (set)) == REG
9921 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9922 && REGNO (SET_SRC (set)) < last_max_reg
9923 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9924 && rtx_equal_p (SET_DEST (set), mem))
9925 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9927 /* If this is a call which uses / clobbers this memory
9928 location, we must not change the interface here. */
9929 if (GET_CODE (p) == CALL_INSN
9930 && reg_mentioned_p (loop_info->mems[i].mem,
9931 CALL_INSN_FUNCTION_USAGE (p)))
9933 cancel_changes (0);
9934 loop_info->mems[i].optimize = 0;
9935 break;
9937 else
9938 /* Replace the memory reference with the shadow register. */
9939 replace_loop_mems (p, loop_info->mems[i].mem,
9940 loop_info->mems[i].reg);
9943 if (GET_CODE (p) == CODE_LABEL
9944 || GET_CODE (p) == JUMP_INSN)
9945 maybe_never = 1;
9948 if (! loop_info->mems[i].optimize)
9949 ; /* We found we couldn't do the replacement, so do nothing. */
9950 else if (! apply_change_group ())
9951 /* We couldn't replace all occurrences of the MEM. */
9952 loop_info->mems[i].optimize = 0;
9953 else
9955 /* Load the memory immediately before LOOP->START, which is
9956 the NOTE_LOOP_BEG. */
9957 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9958 rtx set;
9959 rtx best = mem;
9960 int j;
9961 struct elt_loc_list *const_equiv = 0;
9963 if (e)
9965 struct elt_loc_list *equiv;
9966 struct elt_loc_list *best_equiv = 0;
9967 for (equiv = e->locs; equiv; equiv = equiv->next)
9969 if (CONSTANT_P (equiv->loc))
9970 const_equiv = equiv;
9971 else if (GET_CODE (equiv->loc) == REG
9972 /* Extending hard register lifetimes causes crash
9973 on SRC targets. Doing so on non-SRC is
9974 probably also not good idea, since we most
9975 probably have pseudoregister equivalence as
9976 well. */
9977 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9978 best_equiv = equiv;
9980 /* Use the constant equivalence if that is cheap enough. */
9981 if (! best_equiv)
9982 best_equiv = const_equiv;
9983 else if (const_equiv
9984 && (rtx_cost (const_equiv->loc, SET)
9985 <= rtx_cost (best_equiv->loc, SET)))
9987 best_equiv = const_equiv;
9988 const_equiv = 0;
9991 /* If best_equiv is nonzero, we know that MEM is set to a
9992 constant or register before the loop. We will use this
9993 knowledge to initialize the shadow register with that
9994 constant or reg rather than by loading from MEM. */
9995 if (best_equiv)
9996 best = copy_rtx (best_equiv->loc);
9999 set = gen_move_insn (reg, best);
10000 set = loop_insn_hoist (loop, set);
10001 if (REG_P (best))
10003 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10004 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10006 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10007 break;
10011 if (const_equiv)
10012 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10014 if (written)
10016 if (label == NULL_RTX)
10018 label = gen_label_rtx ();
10019 emit_label_after (label, loop->end);
10022 /* Store the memory immediately after END, which is
10023 the NOTE_LOOP_END. */
10024 set = gen_move_insn (copy_rtx (mem), reg);
10025 loop_insn_emit_after (loop, 0, label, set);
10028 if (loop_dump_stream)
10030 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10031 REGNO (reg), (written ? "r/w" : "r/o"));
10032 print_rtl (loop_dump_stream, mem);
10033 fputc ('\n', loop_dump_stream);
10036 /* Attempt a bit of copy propagation. This helps untangle the
10037 data flow, and enables {basic,general}_induction_var to find
10038 more bivs/givs. */
10039 EXECUTE_IF_SET_IN_REG_SET
10040 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10042 try_copy_prop (loop, reg, j);
10044 CLEAR_REG_SET (&load_copies);
10046 EXECUTE_IF_SET_IN_REG_SET
10047 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10049 try_swap_copy_prop (loop, reg, j);
10051 CLEAR_REG_SET (&store_copies);
10055 if (label != NULL_RTX && end_label != NULL_RTX)
10057 /* Now, we need to replace all references to the previous exit
10058 label with the new one. */
10059 rtx_pair rr;
10060 rr.r1 = end_label;
10061 rr.r2 = label;
10063 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10065 for_each_rtx (&p, replace_label, &rr);
10067 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10068 field. This is not handled by for_each_rtx because it doesn't
10069 handle unprinted ('0') fields. We need to update JUMP_LABEL
10070 because the immediately following unroll pass will use it.
10071 replace_label would not work anyways, because that only handles
10072 LABEL_REFs. */
10073 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10074 JUMP_LABEL (p) = label;
10078 cselib_finish ();
10081 /* For communication between note_reg_stored and its caller. */
10082 struct note_reg_stored_arg
10084 int set_seen;
10085 rtx reg;
10088 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10089 is equal to ARG. */
10090 static void
10091 note_reg_stored (x, setter, arg)
10092 rtx x, setter ATTRIBUTE_UNUSED;
10093 void *arg;
10095 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10096 if (t->reg == x)
10097 t->set_seen = 1;
10100 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10101 There must be exactly one insn that sets this pseudo; it will be
10102 deleted if all replacements succeed and we can prove that the register
10103 is not used after the loop. */
10105 static void
10106 try_copy_prop (loop, replacement, regno)
10107 const struct loop *loop;
10108 rtx replacement;
10109 unsigned int regno;
10111 /* This is the reg that we are copying from. */
10112 rtx reg_rtx = regno_reg_rtx[regno];
10113 rtx init_insn = 0;
10114 rtx insn;
10115 /* These help keep track of whether we replaced all uses of the reg. */
10116 int replaced_last = 0;
10117 int store_is_first = 0;
10119 for (insn = next_insn_in_loop (loop, loop->scan_start);
10120 insn != NULL_RTX;
10121 insn = next_insn_in_loop (loop, insn))
10123 rtx set;
10125 /* Only substitute within one extended basic block from the initializing
10126 insn. */
10127 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10128 break;
10130 if (! INSN_P (insn))
10131 continue;
10133 /* Is this the initializing insn? */
10134 set = single_set (insn);
10135 if (set
10136 && GET_CODE (SET_DEST (set)) == REG
10137 && REGNO (SET_DEST (set)) == regno)
10139 if (init_insn)
10140 abort ();
10142 init_insn = insn;
10143 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10144 store_is_first = 1;
10147 /* Only substitute after seeing the initializing insn. */
10148 if (init_insn && insn != init_insn)
10150 struct note_reg_stored_arg arg;
10152 replace_loop_regs (insn, reg_rtx, replacement);
10153 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10154 replaced_last = 1;
10156 /* Stop replacing when REPLACEMENT is modified. */
10157 arg.reg = replacement;
10158 arg.set_seen = 0;
10159 note_stores (PATTERN (insn), note_reg_stored, &arg);
10160 if (arg.set_seen)
10162 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10164 /* It is possible that we've turned previously valid REG_EQUAL to
10165 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10166 REPLACEMENT is modified, we get different meaning. */
10167 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10168 remove_note (insn, note);
10169 break;
10173 if (! init_insn)
10174 abort ();
10175 if (apply_change_group ())
10177 if (loop_dump_stream)
10178 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10179 if (store_is_first && replaced_last)
10181 rtx first;
10182 rtx retval_note;
10184 /* Assume we're just deleting INIT_INSN. */
10185 first = init_insn;
10186 /* Look for REG_RETVAL note. If we're deleting the end of
10187 the libcall sequence, the whole sequence can go. */
10188 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10189 /* If we found a REG_RETVAL note, find the first instruction
10190 in the sequence. */
10191 if (retval_note)
10192 first = XEXP (retval_note, 0);
10194 /* Delete the instructions. */
10195 loop_delete_insns (first, init_insn);
10197 if (loop_dump_stream)
10198 fprintf (loop_dump_stream, ".\n");
10202 /* Replace all the instructions from FIRST up to and including LAST
10203 with NOTE_INSN_DELETED notes. */
10205 static void
10206 loop_delete_insns (first, last)
10207 rtx first;
10208 rtx last;
10210 while (1)
10212 if (loop_dump_stream)
10213 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10214 INSN_UID (first));
10215 delete_insn (first);
10217 /* If this was the LAST instructions we're supposed to delete,
10218 we're done. */
10219 if (first == last)
10220 break;
10222 first = NEXT_INSN (first);
10226 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10227 loop LOOP if the order of the sets of these registers can be
10228 swapped. There must be exactly one insn within the loop that sets
10229 this pseudo followed immediately by a move insn that sets
10230 REPLACEMENT with REGNO. */
10231 static void
10232 try_swap_copy_prop (loop, replacement, regno)
10233 const struct loop *loop;
10234 rtx replacement;
10235 unsigned int regno;
10237 rtx insn;
10238 rtx set = NULL_RTX;
10239 unsigned int new_regno;
10241 new_regno = REGNO (replacement);
10243 for (insn = next_insn_in_loop (loop, loop->scan_start);
10244 insn != NULL_RTX;
10245 insn = next_insn_in_loop (loop, insn))
10247 /* Search for the insn that copies REGNO to NEW_REGNO? */
10248 if (INSN_P (insn)
10249 && (set = single_set (insn))
10250 && GET_CODE (SET_DEST (set)) == REG
10251 && REGNO (SET_DEST (set)) == new_regno
10252 && GET_CODE (SET_SRC (set)) == REG
10253 && REGNO (SET_SRC (set)) == regno)
10254 break;
10257 if (insn != NULL_RTX)
10259 rtx prev_insn;
10260 rtx prev_set;
10262 /* Some DEF-USE info would come in handy here to make this
10263 function more general. For now, just check the previous insn
10264 which is the most likely candidate for setting REGNO. */
10266 prev_insn = PREV_INSN (insn);
10268 if (INSN_P (insn)
10269 && (prev_set = single_set (prev_insn))
10270 && GET_CODE (SET_DEST (prev_set)) == REG
10271 && REGNO (SET_DEST (prev_set)) == regno)
10273 /* We have:
10274 (set (reg regno) (expr))
10275 (set (reg new_regno) (reg regno))
10277 so try converting this to:
10278 (set (reg new_regno) (expr))
10279 (set (reg regno) (reg new_regno))
10281 The former construct is often generated when a global
10282 variable used for an induction variable is shadowed by a
10283 register (NEW_REGNO). The latter construct improves the
10284 chances of GIV replacement and BIV elimination. */
10286 validate_change (prev_insn, &SET_DEST (prev_set),
10287 replacement, 1);
10288 validate_change (insn, &SET_DEST (set),
10289 SET_SRC (set), 1);
10290 validate_change (insn, &SET_SRC (set),
10291 replacement, 1);
10293 if (apply_change_group ())
10295 if (loop_dump_stream)
10296 fprintf (loop_dump_stream,
10297 " Swapped set of reg %d at %d with reg %d at %d.\n",
10298 regno, INSN_UID (insn),
10299 new_regno, INSN_UID (prev_insn));
10301 /* Update first use of REGNO. */
10302 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10303 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10305 /* Now perform copy propagation to hopefully
10306 remove all uses of REGNO within the loop. */
10307 try_copy_prop (loop, replacement, regno);
10313 /* Replace MEM with its associated pseudo register. This function is
10314 called from load_mems via for_each_rtx. DATA is actually a pointer
10315 to a structure describing the instruction currently being scanned
10316 and the MEM we are currently replacing. */
10318 static int
10319 replace_loop_mem (mem, data)
10320 rtx *mem;
10321 void *data;
10323 loop_replace_args *args = (loop_replace_args *) data;
10324 rtx m = *mem;
10326 if (m == NULL_RTX)
10327 return 0;
10329 switch (GET_CODE (m))
10331 case MEM:
10332 break;
10334 case CONST_DOUBLE:
10335 /* We're not interested in the MEM associated with a
10336 CONST_DOUBLE, so there's no need to traverse into one. */
10337 return -1;
10339 default:
10340 /* This is not a MEM. */
10341 return 0;
10344 if (!rtx_equal_p (args->match, m))
10345 /* This is not the MEM we are currently replacing. */
10346 return 0;
10348 /* Actually replace the MEM. */
10349 validate_change (args->insn, mem, args->replacement, 1);
10351 return 0;
10354 static void
10355 replace_loop_mems (insn, mem, reg)
10356 rtx insn;
10357 rtx mem;
10358 rtx reg;
10360 loop_replace_args args;
10362 args.insn = insn;
10363 args.match = mem;
10364 args.replacement = reg;
10366 for_each_rtx (&insn, replace_loop_mem, &args);
10369 /* Replace one register with another. Called through for_each_rtx; PX points
10370 to the rtx being scanned. DATA is actually a pointer to
10371 a structure of arguments. */
10373 static int
10374 replace_loop_reg (px, data)
10375 rtx *px;
10376 void *data;
10378 rtx x = *px;
10379 loop_replace_args *args = (loop_replace_args *) data;
10381 if (x == NULL_RTX)
10382 return 0;
10384 if (x == args->match)
10385 validate_change (args->insn, px, args->replacement, 1);
10387 return 0;
10390 static void
10391 replace_loop_regs (insn, reg, replacement)
10392 rtx insn;
10393 rtx reg;
10394 rtx replacement;
10396 loop_replace_args args;
10398 args.insn = insn;
10399 args.match = reg;
10400 args.replacement = replacement;
10402 for_each_rtx (&insn, replace_loop_reg, &args);
10405 /* Replace occurrences of the old exit label for the loop with the new
10406 one. DATA is an rtx_pair containing the old and new labels,
10407 respectively. */
10409 static int
10410 replace_label (x, data)
10411 rtx *x;
10412 void *data;
10414 rtx l = *x;
10415 rtx old_label = ((rtx_pair *) data)->r1;
10416 rtx new_label = ((rtx_pair *) data)->r2;
10418 if (l == NULL_RTX)
10419 return 0;
10421 if (GET_CODE (l) != LABEL_REF)
10422 return 0;
10424 if (XEXP (l, 0) != old_label)
10425 return 0;
10427 XEXP (l, 0) = new_label;
10428 ++LABEL_NUSES (new_label);
10429 --LABEL_NUSES (old_label);
10431 return 0;
10434 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10435 (ignored in the interim). */
10437 static rtx
10438 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10439 const struct loop *loop ATTRIBUTE_UNUSED;
10440 basic_block where_bb ATTRIBUTE_UNUSED;
10441 rtx where_insn;
10442 rtx pattern;
10444 return emit_insn_after (pattern, where_insn);
10448 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10449 in basic block WHERE_BB (ignored in the interim) within the loop
10450 otherwise hoist PATTERN into the loop pre-header. */
10453 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10454 const struct loop *loop;
10455 basic_block where_bb ATTRIBUTE_UNUSED;
10456 rtx where_insn;
10457 rtx pattern;
10459 if (! where_insn)
10460 return loop_insn_hoist (loop, pattern);
10461 return emit_insn_before (pattern, where_insn);
10465 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10466 WHERE_BB (ignored in the interim) within the loop. */
10468 static rtx
10469 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10470 const struct loop *loop ATTRIBUTE_UNUSED;
10471 basic_block where_bb ATTRIBUTE_UNUSED;
10472 rtx where_insn;
10473 rtx pattern;
10475 return emit_call_insn_before (pattern, where_insn);
10479 /* Hoist insn for PATTERN into the loop pre-header. */
10482 loop_insn_hoist (loop, pattern)
10483 const struct loop *loop;
10484 rtx pattern;
10486 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10490 /* Hoist call insn for PATTERN into the loop pre-header. */
10492 static rtx
10493 loop_call_insn_hoist (loop, pattern)
10494 const struct loop *loop;
10495 rtx pattern;
10497 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10501 /* Sink insn for PATTERN after the loop end. */
10504 loop_insn_sink (loop, pattern)
10505 const struct loop *loop;
10506 rtx pattern;
10508 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10511 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10512 and constant. Emit sequence of intructions to load it into REG */
10513 static rtx
10514 gen_load_of_final_value (reg, final_value)
10515 rtx reg, final_value;
10517 rtx seq;
10518 start_sequence ();
10519 final_value = force_operand (final_value, reg);
10520 if (final_value != reg)
10521 emit_move_insn (reg, final_value);
10522 seq = get_insns ();
10523 end_sequence ();
10524 return seq;
10527 /* If the loop has multiple exits, emit insn for PATTERN before the
10528 loop to ensure that it will always be executed no matter how the
10529 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10530 since this is slightly more efficient. */
10532 static rtx
10533 loop_insn_sink_or_swim (loop, pattern)
10534 const struct loop *loop;
10535 rtx pattern;
10537 if (loop->exit_count)
10538 return loop_insn_hoist (loop, pattern);
10539 else
10540 return loop_insn_sink (loop, pattern);
10543 static void
10544 loop_ivs_dump (loop, file, verbose)
10545 const struct loop *loop;
10546 FILE *file;
10547 int verbose;
10549 struct iv_class *bl;
10550 int iv_num = 0;
10552 if (! loop || ! file)
10553 return;
10555 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10556 iv_num++;
10558 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10560 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10562 loop_iv_class_dump (bl, file, verbose);
10563 fputc ('\n', file);
10568 static void
10569 loop_iv_class_dump (bl, file, verbose)
10570 const struct iv_class *bl;
10571 FILE *file;
10572 int verbose ATTRIBUTE_UNUSED;
10574 struct induction *v;
10575 rtx incr;
10576 int i;
10578 if (! bl || ! file)
10579 return;
10581 fprintf (file, "IV class for reg %d, benefit %d\n",
10582 bl->regno, bl->total_benefit);
10584 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10585 if (bl->initial_value)
10587 fprintf (file, ", init val: ");
10588 print_simple_rtl (file, bl->initial_value);
10590 if (bl->initial_test)
10592 fprintf (file, ", init test: ");
10593 print_simple_rtl (file, bl->initial_test);
10595 fputc ('\n', file);
10597 if (bl->final_value)
10599 fprintf (file, " Final val: ");
10600 print_simple_rtl (file, bl->final_value);
10601 fputc ('\n', file);
10604 if ((incr = biv_total_increment (bl)))
10606 fprintf (file, " Total increment: ");
10607 print_simple_rtl (file, incr);
10608 fputc ('\n', file);
10611 /* List the increments. */
10612 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10614 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10615 print_simple_rtl (file, v->add_val);
10616 fputc ('\n', file);
10619 /* List the givs. */
10620 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10622 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10623 i, INSN_UID (v->insn), v->benefit);
10624 if (v->giv_type == DEST_ADDR)
10625 print_simple_rtl (file, v->mem);
10626 else
10627 print_simple_rtl (file, single_set (v->insn));
10628 fputc ('\n', file);
10633 static void
10634 loop_biv_dump (v, file, verbose)
10635 const struct induction *v;
10636 FILE *file;
10637 int verbose;
10639 if (! v || ! file)
10640 return;
10642 fprintf (file,
10643 "Biv %d: insn %d",
10644 REGNO (v->dest_reg), INSN_UID (v->insn));
10645 fprintf (file, " const ");
10646 print_simple_rtl (file, v->add_val);
10648 if (verbose && v->final_value)
10650 fputc ('\n', file);
10651 fprintf (file, " final ");
10652 print_simple_rtl (file, v->final_value);
10655 fputc ('\n', file);
10659 static void
10660 loop_giv_dump (v, file, verbose)
10661 const struct induction *v;
10662 FILE *file;
10663 int verbose;
10665 if (! v || ! file)
10666 return;
10668 if (v->giv_type == DEST_REG)
10669 fprintf (file, "Giv %d: insn %d",
10670 REGNO (v->dest_reg), INSN_UID (v->insn));
10671 else
10672 fprintf (file, "Dest address: insn %d",
10673 INSN_UID (v->insn));
10675 fprintf (file, " src reg %d benefit %d",
10676 REGNO (v->src_reg), v->benefit);
10677 fprintf (file, " lifetime %d",
10678 v->lifetime);
10680 if (v->replaceable)
10681 fprintf (file, " replaceable");
10683 if (v->no_const_addval)
10684 fprintf (file, " ncav");
10686 if (v->ext_dependent)
10688 switch (GET_CODE (v->ext_dependent))
10690 case SIGN_EXTEND:
10691 fprintf (file, " ext se");
10692 break;
10693 case ZERO_EXTEND:
10694 fprintf (file, " ext ze");
10695 break;
10696 case TRUNCATE:
10697 fprintf (file, " ext tr");
10698 break;
10699 default:
10700 abort ();
10704 fputc ('\n', file);
10705 fprintf (file, " mult ");
10706 print_simple_rtl (file, v->mult_val);
10708 fputc ('\n', file);
10709 fprintf (file, " add ");
10710 print_simple_rtl (file, v->add_val);
10712 if (verbose && v->final_value)
10714 fputc ('\n', file);
10715 fprintf (file, " final ");
10716 print_simple_rtl (file, v->final_value);
10719 fputc ('\n', file);
10723 void
10724 debug_ivs (loop)
10725 const struct loop *loop;
10727 loop_ivs_dump (loop, stderr, 1);
10731 void
10732 debug_iv_class (bl)
10733 const struct iv_class *bl;
10735 loop_iv_class_dump (bl, stderr, 1);
10739 void
10740 debug_biv (v)
10741 const struct induction *v;
10743 loop_biv_dump (v, stderr, 1);
10747 void
10748 debug_giv (v)
10749 const struct induction *v;
10751 loop_giv_dump (v, stderr, 1);
10755 #define LOOP_BLOCK_NUM_1(INSN) \
10756 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10758 /* The notes do not have an assigned block, so look at the next insn. */
10759 #define LOOP_BLOCK_NUM(INSN) \
10760 ((INSN) ? (GET_CODE (INSN) == NOTE \
10761 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10762 : LOOP_BLOCK_NUM_1 (INSN)) \
10763 : -1)
10765 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10767 static void
10768 loop_dump_aux (loop, file, verbose)
10769 const struct loop *loop;
10770 FILE *file;
10771 int verbose ATTRIBUTE_UNUSED;
10773 rtx label;
10775 if (! loop || ! file)
10776 return;
10778 /* Print diagnostics to compare our concept of a loop with
10779 what the loop notes say. */
10780 if (! PREV_INSN (loop->first->head)
10781 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10782 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10783 != NOTE_INSN_LOOP_BEG)
10784 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10785 INSN_UID (PREV_INSN (loop->first->head)));
10786 if (! NEXT_INSN (loop->last->end)
10787 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10788 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10789 != NOTE_INSN_LOOP_END)
10790 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10791 INSN_UID (NEXT_INSN (loop->last->end)));
10793 if (loop->start)
10795 fprintf (file,
10796 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10797 LOOP_BLOCK_NUM (loop->start),
10798 LOOP_INSN_UID (loop->start),
10799 LOOP_BLOCK_NUM (loop->cont),
10800 LOOP_INSN_UID (loop->cont),
10801 LOOP_BLOCK_NUM (loop->cont),
10802 LOOP_INSN_UID (loop->cont),
10803 LOOP_BLOCK_NUM (loop->vtop),
10804 LOOP_INSN_UID (loop->vtop),
10805 LOOP_BLOCK_NUM (loop->end),
10806 LOOP_INSN_UID (loop->end));
10807 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10808 LOOP_BLOCK_NUM (loop->top),
10809 LOOP_INSN_UID (loop->top),
10810 LOOP_BLOCK_NUM (loop->scan_start),
10811 LOOP_INSN_UID (loop->scan_start));
10812 fprintf (file, ";; exit_count %d", loop->exit_count);
10813 if (loop->exit_count)
10815 fputs (", labels:", file);
10816 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10818 fprintf (file, " %d ",
10819 LOOP_INSN_UID (XEXP (label, 0)));
10822 fputs ("\n", file);
10824 /* This can happen when a marked loop appears as two nested loops,
10825 say from while (a || b) {}. The inner loop won't match
10826 the loop markers but the outer one will. */
10827 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10828 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10832 /* Call this function from the debugger to dump LOOP. */
10834 void
10835 debug_loop (loop)
10836 const struct loop *loop;
10838 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10841 /* Call this function from the debugger to dump LOOPS. */
10843 void
10844 debug_loops (loops)
10845 const struct loops *loops;
10847 flow_loops_dump (loops, stderr, loop_dump_aux, 1);