PR libstdc++/9527, PR libstdc++/8713
[official-gcc.git] / gcc / loop.c
blob4bdef2d0af7711428d3cbc5298006dd0b29492ae
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "coretypes.h"
40 #include "tm.h"
41 #include "rtl.h"
42 #include "tm_p.h"
43 #include "function.h"
44 #include "expr.h"
45 #include "hard-reg-set.h"
46 #include "basic-block.h"
47 #include "insn-config.h"
48 #include "regs.h"
49 #include "recog.h"
50 #include "flags.h"
51 #include "real.h"
52 #include "loop.h"
53 #include "cselib.h"
54 #include "except.h"
55 #include "toplev.h"
56 #include "predict.h"
57 #include "insn-flags.h"
58 #include "optabs.h"
59 #include "cfgloop.h"
61 /* Not really meaningful values, but at least something. */
62 #ifndef SIMULTANEOUS_PREFETCHES
63 #define SIMULTANEOUS_PREFETCHES 3
64 #endif
65 #ifndef PREFETCH_BLOCK
66 #define PREFETCH_BLOCK 32
67 #endif
68 #ifndef HAVE_prefetch
69 #define HAVE_prefetch 0
70 #define CODE_FOR_prefetch 0
71 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
72 #endif
74 /* Give up the prefetch optimizations once we exceed a given threshhold.
75 It is unlikely that we would be able to optimize something in a loop
76 with so many detected prefetches. */
77 #define MAX_PREFETCHES 100
78 /* The number of prefetch blocks that are beneficial to fetch at once before
79 a loop with a known (and low) iteration count. */
80 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
81 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
82 since it is likely that the data are already in the cache. */
83 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
85 /* Parameterize some prefetch heuristics so they can be turned on and off
86 easily for performance testing on new architectures. These can be
87 defined in target-dependent files. */
89 /* Prefetch is worthwhile only when loads/stores are dense. */
90 #ifndef PREFETCH_ONLY_DENSE_MEM
91 #define PREFETCH_ONLY_DENSE_MEM 1
92 #endif
94 /* Define what we mean by "dense" loads and stores; This value divided by 256
95 is the minimum percentage of memory references that worth prefetching. */
96 #ifndef PREFETCH_DENSE_MEM
97 #define PREFETCH_DENSE_MEM 220
98 #endif
100 /* Do not prefetch for a loop whose iteration count is known to be low. */
101 #ifndef PREFETCH_NO_LOW_LOOPCNT
102 #define PREFETCH_NO_LOW_LOOPCNT 1
103 #endif
105 /* Define what we mean by a "low" iteration count. */
106 #ifndef PREFETCH_LOW_LOOPCNT
107 #define PREFETCH_LOW_LOOPCNT 32
108 #endif
110 /* Do not prefetch for a loop that contains a function call; such a loop is
111 probably not an internal loop. */
112 #ifndef PREFETCH_NO_CALL
113 #define PREFETCH_NO_CALL 1
114 #endif
116 /* Do not prefetch accesses with an extreme stride. */
117 #ifndef PREFETCH_NO_EXTREME_STRIDE
118 #define PREFETCH_NO_EXTREME_STRIDE 1
119 #endif
121 /* Define what we mean by an "extreme" stride. */
122 #ifndef PREFETCH_EXTREME_STRIDE
123 #define PREFETCH_EXTREME_STRIDE 4096
124 #endif
126 /* Define a limit to how far apart indices can be and still be merged
127 into a single prefetch. */
128 #ifndef PREFETCH_EXTREME_DIFFERENCE
129 #define PREFETCH_EXTREME_DIFFERENCE 4096
130 #endif
132 /* Issue prefetch instructions before the loop to fetch data to be used
133 in the first few loop iterations. */
134 #ifndef PREFETCH_BEFORE_LOOP
135 #define PREFETCH_BEFORE_LOOP 1
136 #endif
138 /* Do not handle reversed order prefetches (negative stride). */
139 #ifndef PREFETCH_NO_REVERSE_ORDER
140 #define PREFETCH_NO_REVERSE_ORDER 1
141 #endif
143 /* Prefetch even if the GIV is in conditional code. */
144 #ifndef PREFETCH_CONDITIONAL
145 #define PREFETCH_CONDITIONAL 1
146 #endif
148 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
149 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
151 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
152 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
153 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
155 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
156 ((REGNO) < FIRST_PSEUDO_REGISTER \
157 ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
160 /* Vector mapping INSN_UIDs to luids.
161 The luids are like uids but increase monotonically always.
162 We use them to see whether a jump comes from outside a given loop. */
164 int *uid_luid;
166 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
167 number the insn is contained in. */
169 struct loop **uid_loop;
171 /* 1 + largest uid of any insn. */
173 int max_uid_for_loop;
175 /* Number of loops detected in current function. Used as index to the
176 next few tables. */
178 static int max_loop_num;
180 /* Bound on pseudo register number before loop optimization.
181 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
182 unsigned int max_reg_before_loop;
184 /* The value to pass to the next call of reg_scan_update. */
185 static int loop_max_reg;
187 /* During the analysis of a loop, a chain of `struct movable's
188 is made to record all the movable insns found.
189 Then the entire chain can be scanned to decide which to move. */
191 struct movable
193 rtx insn; /* A movable insn */
194 rtx set_src; /* The expression this reg is set from. */
195 rtx set_dest; /* The destination of this SET. */
196 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
197 of any registers used within the LIBCALL. */
198 int consec; /* Number of consecutive following insns
199 that must be moved with this one. */
200 unsigned int regno; /* The register it sets */
201 short lifetime; /* lifetime of that register;
202 may be adjusted when matching movables
203 that load the same value are found. */
204 short savings; /* Number of insns we can move for this reg,
205 including other movables that force this
206 or match this one. */
207 unsigned int cond : 1; /* 1 if only conditionally movable */
208 unsigned int force : 1; /* 1 means MUST move this insn */
209 unsigned int global : 1; /* 1 means reg is live outside this loop */
210 /* If PARTIAL is 1, GLOBAL means something different:
211 that the reg is live outside the range from where it is set
212 to the following label. */
213 unsigned int done : 1; /* 1 inhibits further processing of this */
215 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
216 In particular, moving it does not make it
217 invariant. */
218 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
219 load SRC, rather than copying INSN. */
220 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
221 first insn of a consecutive sets group. */
222 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
223 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
224 that we should avoid changing when clearing
225 the rest of the reg. */
226 struct movable *match; /* First entry for same value */
227 struct movable *forces; /* An insn that must be moved if this is */
228 struct movable *next;
232 FILE *loop_dump_stream;
234 /* Forward declarations. */
236 static void invalidate_loops_containing_label PARAMS ((rtx));
237 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
238 static void mark_loop_jump PARAMS ((rtx, struct loop *));
239 static void prescan_loop PARAMS ((struct loop *));
240 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
241 static int consec_sets_invariant_p PARAMS ((const struct loop *,
242 rtx, int, rtx));
243 static int labels_in_range_p PARAMS ((rtx, int));
244 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
245 static void note_addr_stored PARAMS ((rtx, rtx, void *));
246 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
247 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
248 static void scan_loop PARAMS ((struct loop*, int));
249 #if 0
250 static void replace_call_address PARAMS ((rtx, rtx, rtx));
251 #endif
252 static rtx skip_consec_insns PARAMS ((rtx, int));
253 static int libcall_benefit PARAMS ((rtx));
254 static void ignore_some_movables PARAMS ((struct loop_movables *));
255 static void force_movables PARAMS ((struct loop_movables *));
256 static void combine_movables PARAMS ((struct loop_movables *,
257 struct loop_regs *));
258 static int num_unmoved_movables PARAMS ((const struct loop *));
259 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
260 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
261 struct loop_regs *));
262 static void add_label_notes PARAMS ((rtx, rtx));
263 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
264 int, int));
265 static void loop_movables_add PARAMS((struct loop_movables *,
266 struct movable *));
267 static void loop_movables_free PARAMS((struct loop_movables *));
268 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
269 static void loop_bivs_find PARAMS((struct loop *));
270 static void loop_bivs_init_find PARAMS((struct loop *));
271 static void loop_bivs_check PARAMS((struct loop *));
272 static void loop_givs_find PARAMS((struct loop *));
273 static void loop_givs_check PARAMS((struct loop *));
274 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
275 int, int));
276 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
277 struct induction *, rtx));
278 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
279 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
281 rtx *));
282 static void loop_ivs_free PARAMS((struct loop *));
283 static void strength_reduce PARAMS ((struct loop *, int));
284 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
285 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
286 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
287 static void record_biv PARAMS ((struct loop *, struct induction *,
288 rtx, rtx, rtx, rtx, rtx *,
289 int, int));
290 static void check_final_value PARAMS ((const struct loop *,
291 struct induction *));
292 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
293 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
294 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
295 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
296 static void record_giv PARAMS ((const struct loop *, struct induction *,
297 rtx, rtx, rtx, rtx, rtx, rtx, int,
298 enum g_types, int, int, rtx *));
299 static void update_giv_derive PARAMS ((const struct loop *, rtx));
300 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
301 struct loop_info *));
302 static int basic_induction_var PARAMS ((const struct loop *, rtx,
303 enum machine_mode, rtx, rtx,
304 rtx *, rtx *, rtx **));
305 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
306 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
307 rtx *, rtx *, rtx *, int, int *,
308 enum machine_mode));
309 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
310 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
311 static int check_dbra_loop PARAMS ((struct loop *, int));
312 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
313 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
314 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
315 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
316 static int product_cheap_p PARAMS ((rtx, rtx));
317 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
318 int, int, int));
319 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
320 struct iv_class *, int,
321 basic_block, rtx));
322 static int last_use_this_basic_block PARAMS ((rtx, rtx));
323 static void record_initial PARAMS ((rtx, rtx, void *));
324 static void update_reg_last_use PARAMS ((rtx, rtx));
325 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
326 static void loop_regs_scan PARAMS ((const struct loop *, int));
327 static int count_insns_in_loop PARAMS ((const struct loop *));
328 static void load_mems PARAMS ((const struct loop *));
329 static int insert_loop_mem PARAMS ((rtx *, void *));
330 static int replace_loop_mem PARAMS ((rtx *, void *));
331 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
332 static int replace_loop_reg PARAMS ((rtx *, void *));
333 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
334 static void note_reg_stored PARAMS ((rtx, rtx, void *));
335 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
336 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
337 unsigned int));
338 static int replace_label PARAMS ((rtx *, void *));
339 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
340 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
341 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
342 static void loop_regs_update PARAMS ((const struct loop *, rtx));
343 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
345 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
346 rtx, rtx));
347 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
348 basic_block, rtx, rtx));
349 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
350 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
352 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
353 static void loop_delete_insns PARAMS ((rtx, rtx));
354 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
355 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
356 void debug_ivs PARAMS ((const struct loop *));
357 void debug_iv_class PARAMS ((const struct iv_class *));
358 void debug_biv PARAMS ((const struct induction *));
359 void debug_giv PARAMS ((const struct induction *));
360 void debug_loop PARAMS ((const struct loop *));
361 void debug_loops PARAMS ((const struct loops *));
363 typedef struct rtx_pair
365 rtx r1;
366 rtx r2;
367 } rtx_pair;
369 typedef struct loop_replace_args
371 rtx match;
372 rtx replacement;
373 rtx insn;
374 } loop_replace_args;
376 /* Nonzero iff INSN is between START and END, inclusive. */
377 #define INSN_IN_RANGE_P(INSN, START, END) \
378 (INSN_UID (INSN) < max_uid_for_loop \
379 && INSN_LUID (INSN) >= INSN_LUID (START) \
380 && INSN_LUID (INSN) <= INSN_LUID (END))
382 /* Indirect_jump_in_function is computed once per function. */
383 static int indirect_jump_in_function;
384 static int indirect_jump_in_function_p PARAMS ((rtx));
386 static int compute_luids PARAMS ((rtx, rtx, int));
388 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
389 struct induction *,
390 rtx));
392 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
393 copy the value of the strength reduced giv to its original register. */
394 static int copy_cost;
396 /* Cost of using a register, to normalize the benefits of a giv. */
397 static int reg_address_cost;
399 void
400 init_loop ()
402 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
404 reg_address_cost = address_cost (reg, SImode);
406 copy_cost = COSTS_N_INSNS (1);
409 /* Compute the mapping from uids to luids.
410 LUIDs are numbers assigned to insns, like uids,
411 except that luids increase monotonically through the code.
412 Start at insn START and stop just before END. Assign LUIDs
413 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
414 static int
415 compute_luids (start, end, prev_luid)
416 rtx start, end;
417 int prev_luid;
419 int i;
420 rtx insn;
422 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
424 if (INSN_UID (insn) >= max_uid_for_loop)
425 continue;
426 /* Don't assign luids to line-number NOTEs, so that the distance in
427 luids between two insns is not affected by -g. */
428 if (GET_CODE (insn) != NOTE
429 || NOTE_LINE_NUMBER (insn) <= 0)
430 uid_luid[INSN_UID (insn)] = ++i;
431 else
432 /* Give a line number note the same luid as preceding insn. */
433 uid_luid[INSN_UID (insn)] = i;
435 return i + 1;
438 /* Entry point of this file. Perform loop optimization
439 on the current function. F is the first insn of the function
440 and DUMPFILE is a stream for output of a trace of actions taken
441 (or 0 if none should be output). */
443 void
444 loop_optimize (f, dumpfile, flags)
445 /* f is the first instruction of a chain of insns for one function */
446 rtx f;
447 FILE *dumpfile;
448 int flags;
450 rtx insn;
451 int i;
452 struct loops loops_data;
453 struct loops *loops = &loops_data;
454 struct loop_info *loops_info;
456 loop_dump_stream = dumpfile;
458 init_recog_no_volatile ();
460 max_reg_before_loop = max_reg_num ();
461 loop_max_reg = max_reg_before_loop;
463 regs_may_share = 0;
465 /* Count the number of loops. */
467 max_loop_num = 0;
468 for (insn = f; insn; insn = NEXT_INSN (insn))
470 if (GET_CODE (insn) == NOTE
471 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
472 max_loop_num++;
475 /* Don't waste time if no loops. */
476 if (max_loop_num == 0)
477 return;
479 loops->num = max_loop_num;
481 /* Get size to use for tables indexed by uids.
482 Leave some space for labels allocated by find_and_verify_loops. */
483 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
485 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
486 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
487 sizeof (struct loop *));
489 /* Allocate storage for array of loops. */
490 loops->array = (struct loop *)
491 xcalloc (loops->num, sizeof (struct loop));
493 /* Find and process each loop.
494 First, find them, and record them in order of their beginnings. */
495 find_and_verify_loops (f, loops);
497 /* Allocate and initialize auxiliary loop information. */
498 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
499 for (i = 0; i < (int) loops->num; i++)
500 loops->array[i].aux = loops_info + i;
502 /* Now find all register lifetimes. This must be done after
503 find_and_verify_loops, because it might reorder the insns in the
504 function. */
505 reg_scan (f, max_reg_before_loop, 1);
507 /* This must occur after reg_scan so that registers created by gcse
508 will have entries in the register tables.
510 We could have added a call to reg_scan after gcse_main in toplev.c,
511 but moving this call to init_alias_analysis is more efficient. */
512 init_alias_analysis ();
514 /* See if we went too far. Note that get_max_uid already returns
515 one more that the maximum uid of all insn. */
516 if (get_max_uid () > max_uid_for_loop)
517 abort ();
518 /* Now reset it to the actual size we need. See above. */
519 max_uid_for_loop = get_max_uid ();
521 /* find_and_verify_loops has already called compute_luids, but it
522 might have rearranged code afterwards, so we need to recompute
523 the luids now. */
524 compute_luids (f, NULL_RTX, 0);
526 /* Don't leave gaps in uid_luid for insns that have been
527 deleted. It is possible that the first or last insn
528 using some register has been deleted by cross-jumping.
529 Make sure that uid_luid for that former insn's uid
530 points to the general area where that insn used to be. */
531 for (i = 0; i < max_uid_for_loop; i++)
533 uid_luid[0] = uid_luid[i];
534 if (uid_luid[0] != 0)
535 break;
537 for (i = 0; i < max_uid_for_loop; i++)
538 if (uid_luid[i] == 0)
539 uid_luid[i] = uid_luid[i - 1];
541 /* Determine if the function has indirect jump. On some systems
542 this prevents low overhead loop instructions from being used. */
543 indirect_jump_in_function = indirect_jump_in_function_p (f);
545 /* Now scan the loops, last ones first, since this means inner ones are done
546 before outer ones. */
547 for (i = max_loop_num - 1; i >= 0; i--)
549 struct loop *loop = &loops->array[i];
551 if (! loop->invalid && loop->end)
552 scan_loop (loop, flags);
555 end_alias_analysis ();
557 /* Clean up. */
558 free (uid_luid);
559 free (uid_loop);
560 free (loops_info);
561 free (loops->array);
564 /* Returns the next insn, in execution order, after INSN. START and
565 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
566 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
567 insn-stream; it is used with loops that are entered near the
568 bottom. */
570 static rtx
571 next_insn_in_loop (loop, insn)
572 const struct loop *loop;
573 rtx insn;
575 insn = NEXT_INSN (insn);
577 if (insn == loop->end)
579 if (loop->top)
580 /* Go to the top of the loop, and continue there. */
581 insn = loop->top;
582 else
583 /* We're done. */
584 insn = NULL_RTX;
587 if (insn == loop->scan_start)
588 /* We're done. */
589 insn = NULL_RTX;
591 return insn;
594 /* Optimize one loop described by LOOP. */
596 /* ??? Could also move memory writes out of loops if the destination address
597 is invariant, the source is invariant, the memory write is not volatile,
598 and if we can prove that no read inside the loop can read this address
599 before the write occurs. If there is a read of this address after the
600 write, then we can also mark the memory read as invariant. */
602 static void
603 scan_loop (loop, flags)
604 struct loop *loop;
605 int flags;
607 struct loop_info *loop_info = LOOP_INFO (loop);
608 struct loop_regs *regs = LOOP_REGS (loop);
609 int i;
610 rtx loop_start = loop->start;
611 rtx loop_end = loop->end;
612 rtx p;
613 /* 1 if we are scanning insns that could be executed zero times. */
614 int maybe_never = 0;
615 /* 1 if we are scanning insns that might never be executed
616 due to a subroutine call which might exit before they are reached. */
617 int call_passed = 0;
618 /* Number of insns in the loop. */
619 int insn_count;
620 int tem;
621 rtx temp, update_start, update_end;
622 /* The SET from an insn, if it is the only SET in the insn. */
623 rtx set, set1;
624 /* Chain describing insns movable in current loop. */
625 struct loop_movables *movables = LOOP_MOVABLES (loop);
626 /* Ratio of extra register life span we can justify
627 for saving an instruction. More if loop doesn't call subroutines
628 since in that case saving an insn makes more difference
629 and more registers are available. */
630 int threshold;
631 /* Nonzero if we are scanning instructions in a sub-loop. */
632 int loop_depth = 0;
633 int in_libcall;
635 loop->top = 0;
637 movables->head = 0;
638 movables->last = 0;
640 /* Determine whether this loop starts with a jump down to a test at
641 the end. This will occur for a small number of loops with a test
642 that is too complex to duplicate in front of the loop.
644 We search for the first insn or label in the loop, skipping NOTEs.
645 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
646 (because we might have a loop executed only once that contains a
647 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
648 (in case we have a degenerate loop).
650 Note that if we mistakenly think that a loop is entered at the top
651 when, in fact, it is entered at the exit test, the only effect will be
652 slightly poorer optimization. Making the opposite error can generate
653 incorrect code. Since very few loops now start with a jump to the
654 exit test, the code here to detect that case is very conservative. */
656 for (p = NEXT_INSN (loop_start);
657 p != loop_end
658 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
659 && (GET_CODE (p) != NOTE
660 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
661 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
662 p = NEXT_INSN (p))
665 loop->scan_start = p;
667 /* If loop end is the end of the current function, then emit a
668 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
669 note insn. This is the position we use when sinking insns out of
670 the loop. */
671 if (NEXT_INSN (loop->end) != 0)
672 loop->sink = NEXT_INSN (loop->end);
673 else
674 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
676 /* Set up variables describing this loop. */
677 prescan_loop (loop);
678 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
680 /* If loop has a jump before the first label,
681 the true entry is the target of that jump.
682 Start scan from there.
683 But record in LOOP->TOP the place where the end-test jumps
684 back to so we can scan that after the end of the loop. */
685 if (GET_CODE (p) == JUMP_INSN
686 /* Loop entry must be unconditional jump (and not a RETURN) */
687 && any_uncondjump_p (p)
688 && JUMP_LABEL (p) != 0
689 /* Check to see whether the jump actually
690 jumps out of the loop (meaning it's no loop).
691 This case can happen for things like
692 do {..} while (0). If this label was generated previously
693 by loop, we can't tell anything about it and have to reject
694 the loop. */
695 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
697 loop->top = next_label (loop->scan_start);
698 loop->scan_start = JUMP_LABEL (p);
701 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
702 as required by loop_reg_used_before_p. So skip such loops. (This
703 test may never be true, but it's best to play it safe.)
705 Also, skip loops where we do not start scanning at a label. This
706 test also rejects loops starting with a JUMP_INSN that failed the
707 test above. */
709 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
710 || GET_CODE (loop->scan_start) != CODE_LABEL)
712 if (loop_dump_stream)
713 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
714 INSN_UID (loop_start), INSN_UID (loop_end));
715 return;
718 /* Allocate extra space for REGs that might be created by load_mems.
719 We allocate a little extra slop as well, in the hopes that we
720 won't have to reallocate the regs array. */
721 loop_regs_scan (loop, loop_info->mems_idx + 16);
722 insn_count = count_insns_in_loop (loop);
724 if (loop_dump_stream)
726 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
727 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
728 if (loop->cont)
729 fprintf (loop_dump_stream, "Continue at insn %d.\n",
730 INSN_UID (loop->cont));
733 /* Scan through the loop finding insns that are safe to move.
734 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
735 this reg will be considered invariant for subsequent insns.
736 We consider whether subsequent insns use the reg
737 in deciding whether it is worth actually moving.
739 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
740 and therefore it is possible that the insns we are scanning
741 would never be executed. At such times, we must make sure
742 that it is safe to execute the insn once instead of zero times.
743 When MAYBE_NEVER is 0, all insns will be executed at least once
744 so that is not a problem. */
746 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
747 p != NULL_RTX;
748 p = next_insn_in_loop (loop, p))
750 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
751 in_libcall--;
752 if (GET_CODE (p) == INSN)
754 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
755 if (temp)
756 in_libcall++;
757 if (! in_libcall
758 && (set = single_set (p))
759 && GET_CODE (SET_DEST (set)) == REG
760 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
761 && SET_DEST (set) != pic_offset_table_rtx
762 #endif
763 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
765 int tem1 = 0;
766 int tem2 = 0;
767 int move_insn = 0;
768 rtx src = SET_SRC (set);
769 rtx dependencies = 0;
771 /* Figure out what to use as a source of this insn. If a
772 REG_EQUIV note is given or if a REG_EQUAL note with a
773 constant operand is specified, use it as the source and
774 mark that we should move this insn by calling
775 emit_move_insn rather that duplicating the insn.
777 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
778 note is present. */
779 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
780 if (temp)
781 src = XEXP (temp, 0), move_insn = 1;
782 else
784 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
785 if (temp && CONSTANT_P (XEXP (temp, 0)))
786 src = XEXP (temp, 0), move_insn = 1;
787 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
789 src = XEXP (temp, 0);
790 /* A libcall block can use regs that don't appear in
791 the equivalent expression. To move the libcall,
792 we must move those regs too. */
793 dependencies = libcall_other_reg (p, src);
797 /* For parallels, add any possible uses to the dependencies, as
798 we can't move the insn without resolving them first. */
799 if (GET_CODE (PATTERN (p)) == PARALLEL)
801 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
803 rtx x = XVECEXP (PATTERN (p), 0, i);
804 if (GET_CODE (x) == USE)
805 dependencies
806 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
807 dependencies);
811 /* Don't try to optimize a register that was made
812 by loop-optimization for an inner loop.
813 We don't know its life-span, so we can't compute
814 the benefit. */
815 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
817 else if (/* The register is used in basic blocks other
818 than the one where it is set (meaning that
819 something after this point in the loop might
820 depend on its value before the set). */
821 ! reg_in_basic_block_p (p, SET_DEST (set))
822 /* And the set is not guaranteed to be executed once
823 the loop starts, or the value before the set is
824 needed before the set occurs...
826 ??? Note we have quadratic behavior here, mitigated
827 by the fact that the previous test will often fail for
828 large loops. Rather than re-scanning the entire loop
829 each time for register usage, we should build tables
830 of the register usage and use them here instead. */
831 && (maybe_never
832 || loop_reg_used_before_p (loop, set, p)))
833 /* It is unsafe to move the set.
835 This code used to consider it OK to move a set of a variable
836 which was not created by the user and not used in an exit
837 test.
838 That behavior is incorrect and was removed. */
840 else if ((tem = loop_invariant_p (loop, src))
841 && (dependencies == 0
842 || (tem2
843 = loop_invariant_p (loop, dependencies)) != 0)
844 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
845 || (tem1
846 = consec_sets_invariant_p
847 (loop, SET_DEST (set),
848 regs->array[REGNO (SET_DEST (set))].set_in_loop,
849 p)))
850 /* If the insn can cause a trap (such as divide by zero),
851 can't move it unless it's guaranteed to be executed
852 once loop is entered. Even a function call might
853 prevent the trap insn from being reached
854 (since it might exit!) */
855 && ! ((maybe_never || call_passed)
856 && may_trap_p (src)))
858 struct movable *m;
859 int regno = REGNO (SET_DEST (set));
861 /* A potential lossage is where we have a case where two insns
862 can be combined as long as they are both in the loop, but
863 we move one of them outside the loop. For large loops,
864 this can lose. The most common case of this is the address
865 of a function being called.
867 Therefore, if this register is marked as being used
868 exactly once if we are in a loop with calls
869 (a "large loop"), see if we can replace the usage of
870 this register with the source of this SET. If we can,
871 delete this insn.
873 Don't do this if P has a REG_RETVAL note or if we have
874 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
876 if (loop_info->has_call
877 && regs->array[regno].single_usage != 0
878 && regs->array[regno].single_usage != const0_rtx
879 && REGNO_FIRST_UID (regno) == INSN_UID (p)
880 && (REGNO_LAST_UID (regno)
881 == INSN_UID (regs->array[regno].single_usage))
882 && regs->array[regno].set_in_loop == 1
883 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
884 && ! side_effects_p (SET_SRC (set))
885 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
886 && (! SMALL_REGISTER_CLASSES
887 || (! (GET_CODE (SET_SRC (set)) == REG
888 && (REGNO (SET_SRC (set))
889 < FIRST_PSEUDO_REGISTER))))
890 /* This test is not redundant; SET_SRC (set) might be
891 a call-clobbered register and the life of REGNO
892 might span a call. */
893 && ! modified_between_p (SET_SRC (set), p,
894 regs->array[regno].single_usage)
895 && no_labels_between_p (p,
896 regs->array[regno].single_usage)
897 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
898 regs->array[regno].single_usage))
900 /* Replace any usage in a REG_EQUAL note. Must copy
901 the new source, so that we don't get rtx sharing
902 between the SET_SOURCE and REG_NOTES of insn p. */
903 REG_NOTES (regs->array[regno].single_usage)
904 = (replace_rtx
905 (REG_NOTES (regs->array[regno].single_usage),
906 SET_DEST (set), copy_rtx (SET_SRC (set))));
908 delete_insn (p);
909 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
910 i++)
911 regs->array[regno+i].set_in_loop = 0;
912 continue;
915 m = (struct movable *) xmalloc (sizeof (struct movable));
916 m->next = 0;
917 m->insn = p;
918 m->set_src = src;
919 m->dependencies = dependencies;
920 m->set_dest = SET_DEST (set);
921 m->force = 0;
922 m->consec
923 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
924 m->done = 0;
925 m->forces = 0;
926 m->partial = 0;
927 m->move_insn = move_insn;
928 m->move_insn_first = 0;
929 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
930 m->savemode = VOIDmode;
931 m->regno = regno;
932 /* Set M->cond if either loop_invariant_p
933 or consec_sets_invariant_p returned 2
934 (only conditionally invariant). */
935 m->cond = ((tem | tem1 | tem2) > 1);
936 m->global = LOOP_REG_GLOBAL_P (loop, regno);
937 m->match = 0;
938 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
939 m->savings = regs->array[regno].n_times_set;
940 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
941 m->savings += libcall_benefit (p);
942 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
943 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
944 /* Add M to the end of the chain MOVABLES. */
945 loop_movables_add (movables, m);
947 if (m->consec > 0)
949 /* It is possible for the first instruction to have a
950 REG_EQUAL note but a non-invariant SET_SRC, so we must
951 remember the status of the first instruction in case
952 the last instruction doesn't have a REG_EQUAL note. */
953 m->move_insn_first = m->move_insn;
955 /* Skip this insn, not checking REG_LIBCALL notes. */
956 p = next_nonnote_insn (p);
957 /* Skip the consecutive insns, if there are any. */
958 p = skip_consec_insns (p, m->consec);
959 /* Back up to the last insn of the consecutive group. */
960 p = prev_nonnote_insn (p);
962 /* We must now reset m->move_insn, m->is_equiv, and
963 possibly m->set_src to correspond to the effects of
964 all the insns. */
965 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
966 if (temp)
967 m->set_src = XEXP (temp, 0), m->move_insn = 1;
968 else
970 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
971 if (temp && CONSTANT_P (XEXP (temp, 0)))
972 m->set_src = XEXP (temp, 0), m->move_insn = 1;
973 else
974 m->move_insn = 0;
977 m->is_equiv
978 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
981 /* If this register is always set within a STRICT_LOW_PART
982 or set to zero, then its high bytes are constant.
983 So clear them outside the loop and within the loop
984 just load the low bytes.
985 We must check that the machine has an instruction to do so.
986 Also, if the value loaded into the register
987 depends on the same register, this cannot be done. */
988 else if (SET_SRC (set) == const0_rtx
989 && GET_CODE (NEXT_INSN (p)) == INSN
990 && (set1 = single_set (NEXT_INSN (p)))
991 && GET_CODE (set1) == SET
992 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
993 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
994 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
995 == SET_DEST (set))
996 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
998 int regno = REGNO (SET_DEST (set));
999 if (regs->array[regno].set_in_loop == 2)
1001 struct movable *m;
1002 m = (struct movable *) xmalloc (sizeof (struct movable));
1003 m->next = 0;
1004 m->insn = p;
1005 m->set_dest = SET_DEST (set);
1006 m->dependencies = 0;
1007 m->force = 0;
1008 m->consec = 0;
1009 m->done = 0;
1010 m->forces = 0;
1011 m->move_insn = 0;
1012 m->move_insn_first = 0;
1013 m->partial = 1;
1014 /* If the insn may not be executed on some cycles,
1015 we can't clear the whole reg; clear just high part.
1016 Not even if the reg is used only within this loop.
1017 Consider this:
1018 while (1)
1019 while (s != t) {
1020 if (foo ()) x = *s;
1021 use (x);
1023 Clearing x before the inner loop could clobber a value
1024 being saved from the last time around the outer loop.
1025 However, if the reg is not used outside this loop
1026 and all uses of the register are in the same
1027 basic block as the store, there is no problem.
1029 If this insn was made by loop, we don't know its
1030 INSN_LUID and hence must make a conservative
1031 assumption. */
1032 m->global = (INSN_UID (p) >= max_uid_for_loop
1033 || LOOP_REG_GLOBAL_P (loop, regno)
1034 || (labels_in_range_p
1035 (p, REGNO_FIRST_LUID (regno))));
1036 if (maybe_never && m->global)
1037 m->savemode = GET_MODE (SET_SRC (set1));
1038 else
1039 m->savemode = VOIDmode;
1040 m->regno = regno;
1041 m->cond = 0;
1042 m->match = 0;
1043 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1044 m->savings = 1;
1045 for (i = 0;
1046 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1047 i++)
1048 regs->array[regno+i].set_in_loop = -1;
1049 /* Add M to the end of the chain MOVABLES. */
1050 loop_movables_add (movables, m);
1055 /* Past a call insn, we get to insns which might not be executed
1056 because the call might exit. This matters for insns that trap.
1057 Constant and pure call insns always return, so they don't count. */
1058 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1059 call_passed = 1;
1060 /* Past a label or a jump, we get to insns for which we
1061 can't count on whether or how many times they will be
1062 executed during each iteration. Therefore, we can
1063 only move out sets of trivial variables
1064 (those not used after the loop). */
1065 /* Similar code appears twice in strength_reduce. */
1066 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1067 /* If we enter the loop in the middle, and scan around to the
1068 beginning, don't set maybe_never for that. This must be an
1069 unconditional jump, otherwise the code at the top of the
1070 loop might never be executed. Unconditional jumps are
1071 followed by a barrier then the loop_end. */
1072 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1073 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1074 && any_uncondjump_p (p)))
1075 maybe_never = 1;
1076 else if (GET_CODE (p) == NOTE)
1078 /* At the virtual top of a converted loop, insns are again known to
1079 be executed: logically, the loop begins here even though the exit
1080 code has been duplicated. */
1081 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1082 maybe_never = call_passed = 0;
1083 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1084 loop_depth++;
1085 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1086 loop_depth--;
1090 /* If one movable subsumes another, ignore that other. */
1092 ignore_some_movables (movables);
1094 /* For each movable insn, see if the reg that it loads
1095 leads when it dies right into another conditionally movable insn.
1096 If so, record that the second insn "forces" the first one,
1097 since the second can be moved only if the first is. */
1099 force_movables (movables);
1101 /* See if there are multiple movable insns that load the same value.
1102 If there are, make all but the first point at the first one
1103 through the `match' field, and add the priorities of them
1104 all together as the priority of the first. */
1106 combine_movables (movables, regs);
1108 /* Now consider each movable insn to decide whether it is worth moving.
1109 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1111 For machines with few registers this increases code size, so do not
1112 move moveables when optimizing for code size on such machines.
1113 (The 18 below is the value for i386.) */
1115 if (!optimize_size
1116 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1118 move_movables (loop, movables, threshold, insn_count);
1120 /* Recalculate regs->array if move_movables has created new
1121 registers. */
1122 if (max_reg_num () > regs->num)
1124 loop_regs_scan (loop, 0);
1125 for (update_start = loop_start;
1126 PREV_INSN (update_start)
1127 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1128 update_start = PREV_INSN (update_start))
1130 update_end = NEXT_INSN (loop_end);
1132 reg_scan_update (update_start, update_end, loop_max_reg);
1133 loop_max_reg = max_reg_num ();
1137 /* Now candidates that still are negative are those not moved.
1138 Change regs->array[I].set_in_loop to indicate that those are not actually
1139 invariant. */
1140 for (i = 0; i < regs->num; i++)
1141 if (regs->array[i].set_in_loop < 0)
1142 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1144 /* Now that we've moved some things out of the loop, we might be able to
1145 hoist even more memory references. */
1146 load_mems (loop);
1148 /* Recalculate regs->array if load_mems has created new registers. */
1149 if (max_reg_num () > regs->num)
1150 loop_regs_scan (loop, 0);
1152 for (update_start = loop_start;
1153 PREV_INSN (update_start)
1154 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1155 update_start = PREV_INSN (update_start))
1157 update_end = NEXT_INSN (loop_end);
1159 reg_scan_update (update_start, update_end, loop_max_reg);
1160 loop_max_reg = max_reg_num ();
1162 if (flag_strength_reduce)
1164 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1165 /* Ensure our label doesn't go away. */
1166 LABEL_NUSES (update_end)++;
1168 strength_reduce (loop, flags);
1170 reg_scan_update (update_start, update_end, loop_max_reg);
1171 loop_max_reg = max_reg_num ();
1173 if (update_end && GET_CODE (update_end) == CODE_LABEL
1174 && --LABEL_NUSES (update_end) == 0)
1175 delete_related_insns (update_end);
1179 /* The movable information is required for strength reduction. */
1180 loop_movables_free (movables);
1182 free (regs->array);
1183 regs->array = 0;
1184 regs->num = 0;
1187 /* Add elements to *OUTPUT to record all the pseudo-regs
1188 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1190 void
1191 record_excess_regs (in_this, not_in_this, output)
1192 rtx in_this, not_in_this;
1193 rtx *output;
1195 enum rtx_code code;
1196 const char *fmt;
1197 int i;
1199 code = GET_CODE (in_this);
1201 switch (code)
1203 case PC:
1204 case CC0:
1205 case CONST_INT:
1206 case CONST_DOUBLE:
1207 case CONST:
1208 case SYMBOL_REF:
1209 case LABEL_REF:
1210 return;
1212 case REG:
1213 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1214 && ! reg_mentioned_p (in_this, not_in_this))
1215 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1216 return;
1218 default:
1219 break;
1222 fmt = GET_RTX_FORMAT (code);
1223 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1225 int j;
1227 switch (fmt[i])
1229 case 'E':
1230 for (j = 0; j < XVECLEN (in_this, i); j++)
1231 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1232 break;
1234 case 'e':
1235 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1236 break;
1241 /* Check what regs are referred to in the libcall block ending with INSN,
1242 aside from those mentioned in the equivalent value.
1243 If there are none, return 0.
1244 If there are one or more, return an EXPR_LIST containing all of them. */
1247 libcall_other_reg (insn, equiv)
1248 rtx insn, equiv;
1250 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1251 rtx p = XEXP (note, 0);
1252 rtx output = 0;
1254 /* First, find all the regs used in the libcall block
1255 that are not mentioned as inputs to the result. */
1257 while (p != insn)
1259 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1260 || GET_CODE (p) == CALL_INSN)
1261 record_excess_regs (PATTERN (p), equiv, &output);
1262 p = NEXT_INSN (p);
1265 return output;
1268 /* Return 1 if all uses of REG
1269 are between INSN and the end of the basic block. */
1271 static int
1272 reg_in_basic_block_p (insn, reg)
1273 rtx insn, reg;
1275 int regno = REGNO (reg);
1276 rtx p;
1278 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1279 return 0;
1281 /* Search this basic block for the already recorded last use of the reg. */
1282 for (p = insn; p; p = NEXT_INSN (p))
1284 switch (GET_CODE (p))
1286 case NOTE:
1287 break;
1289 case INSN:
1290 case CALL_INSN:
1291 /* Ordinary insn: if this is the last use, we win. */
1292 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1293 return 1;
1294 break;
1296 case JUMP_INSN:
1297 /* Jump insn: if this is the last use, we win. */
1298 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1299 return 1;
1300 /* Otherwise, it's the end of the basic block, so we lose. */
1301 return 0;
1303 case CODE_LABEL:
1304 case BARRIER:
1305 /* It's the end of the basic block, so we lose. */
1306 return 0;
1308 default:
1309 break;
1313 /* The "last use" that was recorded can't be found after the first
1314 use. This can happen when the last use was deleted while
1315 processing an inner loop, this inner loop was then completely
1316 unrolled, and the outer loop is always exited after the inner loop,
1317 so that everything after the first use becomes a single basic block. */
1318 return 1;
1321 /* Compute the benefit of eliminating the insns in the block whose
1322 last insn is LAST. This may be a group of insns used to compute a
1323 value directly or can contain a library call. */
1325 static int
1326 libcall_benefit (last)
1327 rtx last;
1329 rtx insn;
1330 int benefit = 0;
1332 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1333 insn != last; insn = NEXT_INSN (insn))
1335 if (GET_CODE (insn) == CALL_INSN)
1336 benefit += 10; /* Assume at least this many insns in a library
1337 routine. */
1338 else if (GET_CODE (insn) == INSN
1339 && GET_CODE (PATTERN (insn)) != USE
1340 && GET_CODE (PATTERN (insn)) != CLOBBER)
1341 benefit++;
1344 return benefit;
1347 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1349 static rtx
1350 skip_consec_insns (insn, count)
1351 rtx insn;
1352 int count;
1354 for (; count > 0; count--)
1356 rtx temp;
1358 /* If first insn of libcall sequence, skip to end. */
1359 /* Do this at start of loop, since INSN is guaranteed to
1360 be an insn here. */
1361 if (GET_CODE (insn) != NOTE
1362 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1363 insn = XEXP (temp, 0);
1366 insn = NEXT_INSN (insn);
1367 while (GET_CODE (insn) == NOTE);
1370 return insn;
1373 /* Ignore any movable whose insn falls within a libcall
1374 which is part of another movable.
1375 We make use of the fact that the movable for the libcall value
1376 was made later and so appears later on the chain. */
1378 static void
1379 ignore_some_movables (movables)
1380 struct loop_movables *movables;
1382 struct movable *m, *m1;
1384 for (m = movables->head; m; m = m->next)
1386 /* Is this a movable for the value of a libcall? */
1387 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1388 if (note)
1390 rtx insn;
1391 /* Check for earlier movables inside that range,
1392 and mark them invalid. We cannot use LUIDs here because
1393 insns created by loop.c for prior loops don't have LUIDs.
1394 Rather than reject all such insns from movables, we just
1395 explicitly check each insn in the libcall (since invariant
1396 libcalls aren't that common). */
1397 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1398 for (m1 = movables->head; m1 != m; m1 = m1->next)
1399 if (m1->insn == insn)
1400 m1->done = 1;
1405 /* For each movable insn, see if the reg that it loads
1406 leads when it dies right into another conditionally movable insn.
1407 If so, record that the second insn "forces" the first one,
1408 since the second can be moved only if the first is. */
1410 static void
1411 force_movables (movables)
1412 struct loop_movables *movables;
1414 struct movable *m, *m1;
1416 for (m1 = movables->head; m1; m1 = m1->next)
1417 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1418 if (!m1->partial && !m1->done)
1420 int regno = m1->regno;
1421 for (m = m1->next; m; m = m->next)
1422 /* ??? Could this be a bug? What if CSE caused the
1423 register of M1 to be used after this insn?
1424 Since CSE does not update regno_last_uid,
1425 this insn M->insn might not be where it dies.
1426 But very likely this doesn't matter; what matters is
1427 that M's reg is computed from M1's reg. */
1428 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1429 && !m->done)
1430 break;
1431 if (m != 0 && m->set_src == m1->set_dest
1432 /* If m->consec, m->set_src isn't valid. */
1433 && m->consec == 0)
1434 m = 0;
1436 /* Increase the priority of the moving the first insn
1437 since it permits the second to be moved as well. */
1438 if (m != 0)
1440 m->forces = m1;
1441 m1->lifetime += m->lifetime;
1442 m1->savings += m->savings;
1447 /* Find invariant expressions that are equal and can be combined into
1448 one register. */
1450 static void
1451 combine_movables (movables, regs)
1452 struct loop_movables *movables;
1453 struct loop_regs *regs;
1455 struct movable *m;
1456 char *matched_regs = (char *) xmalloc (regs->num);
1457 enum machine_mode mode;
1459 /* Regs that are set more than once are not allowed to match
1460 or be matched. I'm no longer sure why not. */
1461 /* Only pseudo registers are allowed to match or be matched,
1462 since move_movables does not validate the change. */
1463 /* Perhaps testing m->consec_sets would be more appropriate here? */
1465 for (m = movables->head; m; m = m->next)
1466 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1467 && m->regno >= FIRST_PSEUDO_REGISTER
1468 && !m->partial)
1470 struct movable *m1;
1471 int regno = m->regno;
1473 memset (matched_regs, 0, regs->num);
1474 matched_regs[regno] = 1;
1476 /* We want later insns to match the first one. Don't make the first
1477 one match any later ones. So start this loop at m->next. */
1478 for (m1 = m->next; m1; m1 = m1->next)
1479 if (m != m1 && m1->match == 0
1480 && regs->array[m1->regno].n_times_set == 1
1481 && m1->regno >= FIRST_PSEUDO_REGISTER
1482 /* A reg used outside the loop mustn't be eliminated. */
1483 && !m1->global
1484 /* A reg used for zero-extending mustn't be eliminated. */
1485 && !m1->partial
1486 && (matched_regs[m1->regno]
1489 /* Can combine regs with different modes loaded from the
1490 same constant only if the modes are the same or
1491 if both are integer modes with M wider or the same
1492 width as M1. The check for integer is redundant, but
1493 safe, since the only case of differing destination
1494 modes with equal sources is when both sources are
1495 VOIDmode, i.e., CONST_INT. */
1496 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1497 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1498 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1499 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1500 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1501 /* See if the source of M1 says it matches M. */
1502 && ((GET_CODE (m1->set_src) == REG
1503 && matched_regs[REGNO (m1->set_src)])
1504 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1505 movables, regs))))
1506 && ((m->dependencies == m1->dependencies)
1507 || rtx_equal_p (m->dependencies, m1->dependencies)))
1509 m->lifetime += m1->lifetime;
1510 m->savings += m1->savings;
1511 m1->done = 1;
1512 m1->match = m;
1513 matched_regs[m1->regno] = 1;
1517 /* Now combine the regs used for zero-extension.
1518 This can be done for those not marked `global'
1519 provided their lives don't overlap. */
1521 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1522 mode = GET_MODE_WIDER_MODE (mode))
1524 struct movable *m0 = 0;
1526 /* Combine all the registers for extension from mode MODE.
1527 Don't combine any that are used outside this loop. */
1528 for (m = movables->head; m; m = m->next)
1529 if (m->partial && ! m->global
1530 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1532 struct movable *m1;
1534 int first = REGNO_FIRST_LUID (m->regno);
1535 int last = REGNO_LAST_LUID (m->regno);
1537 if (m0 == 0)
1539 /* First one: don't check for overlap, just record it. */
1540 m0 = m;
1541 continue;
1544 /* Make sure they extend to the same mode.
1545 (Almost always true.) */
1546 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1547 continue;
1549 /* We already have one: check for overlap with those
1550 already combined together. */
1551 for (m1 = movables->head; m1 != m; m1 = m1->next)
1552 if (m1 == m0 || (m1->partial && m1->match == m0))
1553 if (! (REGNO_FIRST_LUID (m1->regno) > last
1554 || REGNO_LAST_LUID (m1->regno) < first))
1555 goto overlap;
1557 /* No overlap: we can combine this with the others. */
1558 m0->lifetime += m->lifetime;
1559 m0->savings += m->savings;
1560 m->done = 1;
1561 m->match = m0;
1563 overlap:
1568 /* Clean up. */
1569 free (matched_regs);
1572 /* Returns the number of movable instructions in LOOP that were not
1573 moved outside the loop. */
1575 static int
1576 num_unmoved_movables (loop)
1577 const struct loop *loop;
1579 int num = 0;
1580 struct movable *m;
1582 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1583 if (!m->done)
1584 ++num;
1586 return num;
1590 /* Return 1 if regs X and Y will become the same if moved. */
1592 static int
1593 regs_match_p (x, y, movables)
1594 rtx x, y;
1595 struct loop_movables *movables;
1597 unsigned int xn = REGNO (x);
1598 unsigned int yn = REGNO (y);
1599 struct movable *mx, *my;
1601 for (mx = movables->head; mx; mx = mx->next)
1602 if (mx->regno == xn)
1603 break;
1605 for (my = movables->head; my; my = my->next)
1606 if (my->regno == yn)
1607 break;
1609 return (mx && my
1610 && ((mx->match == my->match && mx->match != 0)
1611 || mx->match == my
1612 || mx == my->match));
1615 /* Return 1 if X and Y are identical-looking rtx's.
1616 This is the Lisp function EQUAL for rtx arguments.
1618 If two registers are matching movables or a movable register and an
1619 equivalent constant, consider them equal. */
1621 static int
1622 rtx_equal_for_loop_p (x, y, movables, regs)
1623 rtx x, y;
1624 struct loop_movables *movables;
1625 struct loop_regs *regs;
1627 int i;
1628 int j;
1629 struct movable *m;
1630 enum rtx_code code;
1631 const char *fmt;
1633 if (x == y)
1634 return 1;
1635 if (x == 0 || y == 0)
1636 return 0;
1638 code = GET_CODE (x);
1640 /* If we have a register and a constant, they may sometimes be
1641 equal. */
1642 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1643 && CONSTANT_P (y))
1645 for (m = movables->head; m; m = m->next)
1646 if (m->move_insn && m->regno == REGNO (x)
1647 && rtx_equal_p (m->set_src, y))
1648 return 1;
1650 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1651 && CONSTANT_P (x))
1653 for (m = movables->head; m; m = m->next)
1654 if (m->move_insn && m->regno == REGNO (y)
1655 && rtx_equal_p (m->set_src, x))
1656 return 1;
1659 /* Otherwise, rtx's of different codes cannot be equal. */
1660 if (code != GET_CODE (y))
1661 return 0;
1663 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1664 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1666 if (GET_MODE (x) != GET_MODE (y))
1667 return 0;
1669 /* These three types of rtx's can be compared nonrecursively. */
1670 if (code == REG)
1671 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1673 if (code == LABEL_REF)
1674 return XEXP (x, 0) == XEXP (y, 0);
1675 if (code == SYMBOL_REF)
1676 return XSTR (x, 0) == XSTR (y, 0);
1678 /* Compare the elements. If any pair of corresponding elements
1679 fail to match, return 0 for the whole things. */
1681 fmt = GET_RTX_FORMAT (code);
1682 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1684 switch (fmt[i])
1686 case 'w':
1687 if (XWINT (x, i) != XWINT (y, i))
1688 return 0;
1689 break;
1691 case 'i':
1692 if (XINT (x, i) != XINT (y, i))
1693 return 0;
1694 break;
1696 case 'E':
1697 /* Two vectors must have the same length. */
1698 if (XVECLEN (x, i) != XVECLEN (y, i))
1699 return 0;
1701 /* And the corresponding elements must match. */
1702 for (j = 0; j < XVECLEN (x, i); j++)
1703 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1704 movables, regs) == 0)
1705 return 0;
1706 break;
1708 case 'e':
1709 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1710 == 0)
1711 return 0;
1712 break;
1714 case 's':
1715 if (strcmp (XSTR (x, i), XSTR (y, i)))
1716 return 0;
1717 break;
1719 case 'u':
1720 /* These are just backpointers, so they don't matter. */
1721 break;
1723 case '0':
1724 break;
1726 /* It is believed that rtx's at this level will never
1727 contain anything but integers and other rtx's,
1728 except for within LABEL_REFs and SYMBOL_REFs. */
1729 default:
1730 abort ();
1733 return 1;
1736 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1737 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1738 references is incremented once for each added note. */
1740 static void
1741 add_label_notes (x, insns)
1742 rtx x;
1743 rtx insns;
1745 enum rtx_code code = GET_CODE (x);
1746 int i, j;
1747 const char *fmt;
1748 rtx insn;
1750 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1752 /* This code used to ignore labels that referred to dispatch tables to
1753 avoid flow generating (slighly) worse code.
1755 We no longer ignore such label references (see LABEL_REF handling in
1756 mark_jump_label for additional information). */
1757 for (insn = insns; insn; insn = NEXT_INSN (insn))
1758 if (reg_mentioned_p (XEXP (x, 0), insn))
1760 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1761 REG_NOTES (insn));
1762 if (LABEL_P (XEXP (x, 0)))
1763 LABEL_NUSES (XEXP (x, 0))++;
1767 fmt = GET_RTX_FORMAT (code);
1768 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1770 if (fmt[i] == 'e')
1771 add_label_notes (XEXP (x, i), insns);
1772 else if (fmt[i] == 'E')
1773 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1774 add_label_notes (XVECEXP (x, i, j), insns);
1778 /* Scan MOVABLES, and move the insns that deserve to be moved.
1779 If two matching movables are combined, replace one reg with the
1780 other throughout. */
1782 static void
1783 move_movables (loop, movables, threshold, insn_count)
1784 struct loop *loop;
1785 struct loop_movables *movables;
1786 int threshold;
1787 int insn_count;
1789 struct loop_regs *regs = LOOP_REGS (loop);
1790 int nregs = regs->num;
1791 rtx new_start = 0;
1792 struct movable *m;
1793 rtx p;
1794 rtx loop_start = loop->start;
1795 rtx loop_end = loop->end;
1796 /* Map of pseudo-register replacements to handle combining
1797 when we move several insns that load the same value
1798 into different pseudo-registers. */
1799 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1800 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1802 for (m = movables->head; m; m = m->next)
1804 /* Describe this movable insn. */
1806 if (loop_dump_stream)
1808 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1809 INSN_UID (m->insn), m->regno, m->lifetime);
1810 if (m->consec > 0)
1811 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1812 if (m->cond)
1813 fprintf (loop_dump_stream, "cond ");
1814 if (m->force)
1815 fprintf (loop_dump_stream, "force ");
1816 if (m->global)
1817 fprintf (loop_dump_stream, "global ");
1818 if (m->done)
1819 fprintf (loop_dump_stream, "done ");
1820 if (m->move_insn)
1821 fprintf (loop_dump_stream, "move-insn ");
1822 if (m->match)
1823 fprintf (loop_dump_stream, "matches %d ",
1824 INSN_UID (m->match->insn));
1825 if (m->forces)
1826 fprintf (loop_dump_stream, "forces %d ",
1827 INSN_UID (m->forces->insn));
1830 /* Ignore the insn if it's already done (it matched something else).
1831 Otherwise, see if it is now safe to move. */
1833 if (!m->done
1834 && (! m->cond
1835 || (1 == loop_invariant_p (loop, m->set_src)
1836 && (m->dependencies == 0
1837 || 1 == loop_invariant_p (loop, m->dependencies))
1838 && (m->consec == 0
1839 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1840 m->consec + 1,
1841 m->insn))))
1842 && (! m->forces || m->forces->done))
1844 int regno;
1845 rtx p;
1846 int savings = m->savings;
1848 /* We have an insn that is safe to move.
1849 Compute its desirability. */
1851 p = m->insn;
1852 regno = m->regno;
1854 if (loop_dump_stream)
1855 fprintf (loop_dump_stream, "savings %d ", savings);
1857 if (regs->array[regno].moved_once && loop_dump_stream)
1858 fprintf (loop_dump_stream, "halved since already moved ");
1860 /* An insn MUST be moved if we already moved something else
1861 which is safe only if this one is moved too: that is,
1862 if already_moved[REGNO] is nonzero. */
1864 /* An insn is desirable to move if the new lifetime of the
1865 register is no more than THRESHOLD times the old lifetime.
1866 If it's not desirable, it means the loop is so big
1867 that moving won't speed things up much,
1868 and it is liable to make register usage worse. */
1870 /* It is also desirable to move if it can be moved at no
1871 extra cost because something else was already moved. */
1873 if (already_moved[regno]
1874 || flag_move_all_movables
1875 || (threshold * savings * m->lifetime) >=
1876 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1877 || (m->forces && m->forces->done
1878 && regs->array[m->forces->regno].n_times_set == 1))
1880 int count;
1881 struct movable *m1;
1882 rtx first = NULL_RTX;
1884 /* Now move the insns that set the reg. */
1886 if (m->partial && m->match)
1888 rtx newpat, i1;
1889 rtx r1, r2;
1890 /* Find the end of this chain of matching regs.
1891 Thus, we load each reg in the chain from that one reg.
1892 And that reg is loaded with 0 directly,
1893 since it has ->match == 0. */
1894 for (m1 = m; m1->match; m1 = m1->match);
1895 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1896 SET_DEST (PATTERN (m1->insn)));
1897 i1 = loop_insn_hoist (loop, newpat);
1899 /* Mark the moved, invariant reg as being allowed to
1900 share a hard reg with the other matching invariant. */
1901 REG_NOTES (i1) = REG_NOTES (m->insn);
1902 r1 = SET_DEST (PATTERN (m->insn));
1903 r2 = SET_DEST (PATTERN (m1->insn));
1904 regs_may_share
1905 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1906 gen_rtx_EXPR_LIST (VOIDmode, r2,
1907 regs_may_share));
1908 delete_insn (m->insn);
1910 if (new_start == 0)
1911 new_start = i1;
1913 if (loop_dump_stream)
1914 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1916 /* If we are to re-generate the item being moved with a
1917 new move insn, first delete what we have and then emit
1918 the move insn before the loop. */
1919 else if (m->move_insn)
1921 rtx i1, temp, seq;
1923 for (count = m->consec; count >= 0; count--)
1925 /* If this is the first insn of a library call sequence,
1926 something is very wrong. */
1927 if (GET_CODE (p) != NOTE
1928 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1929 abort ();
1931 /* If this is the last insn of a libcall sequence, then
1932 delete every insn in the sequence except the last.
1933 The last insn is handled in the normal manner. */
1934 if (GET_CODE (p) != NOTE
1935 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1937 temp = XEXP (temp, 0);
1938 while (temp != p)
1939 temp = delete_insn (temp);
1942 temp = p;
1943 p = delete_insn (p);
1945 /* simplify_giv_expr expects that it can walk the insns
1946 at m->insn forwards and see this old sequence we are
1947 tossing here. delete_insn does preserve the next
1948 pointers, but when we skip over a NOTE we must fix
1949 it up. Otherwise that code walks into the non-deleted
1950 insn stream. */
1951 while (p && GET_CODE (p) == NOTE)
1952 p = NEXT_INSN (temp) = NEXT_INSN (p);
1955 start_sequence ();
1956 emit_move_insn (m->set_dest, m->set_src);
1957 seq = get_insns ();
1958 end_sequence ();
1960 add_label_notes (m->set_src, seq);
1962 i1 = loop_insn_hoist (loop, seq);
1963 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1964 set_unique_reg_note (i1,
1965 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1966 m->set_src);
1968 if (loop_dump_stream)
1969 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1971 /* The more regs we move, the less we like moving them. */
1972 threshold -= 3;
1974 else
1976 for (count = m->consec; count >= 0; count--)
1978 rtx i1, temp;
1980 /* If first insn of libcall sequence, skip to end. */
1981 /* Do this at start of loop, since p is guaranteed to
1982 be an insn here. */
1983 if (GET_CODE (p) != NOTE
1984 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1985 p = XEXP (temp, 0);
1987 /* If last insn of libcall sequence, move all
1988 insns except the last before the loop. The last
1989 insn is handled in the normal manner. */
1990 if (GET_CODE (p) != NOTE
1991 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1993 rtx fn_address = 0;
1994 rtx fn_reg = 0;
1995 rtx fn_address_insn = 0;
1997 first = 0;
1998 for (temp = XEXP (temp, 0); temp != p;
1999 temp = NEXT_INSN (temp))
2001 rtx body;
2002 rtx n;
2003 rtx next;
2005 if (GET_CODE (temp) == NOTE)
2006 continue;
2008 body = PATTERN (temp);
2010 /* Find the next insn after TEMP,
2011 not counting USE or NOTE insns. */
2012 for (next = NEXT_INSN (temp); next != p;
2013 next = NEXT_INSN (next))
2014 if (! (GET_CODE (next) == INSN
2015 && GET_CODE (PATTERN (next)) == USE)
2016 && GET_CODE (next) != NOTE)
2017 break;
2019 /* If that is the call, this may be the insn
2020 that loads the function address.
2022 Extract the function address from the insn
2023 that loads it into a register.
2024 If this insn was cse'd, we get incorrect code.
2026 So emit a new move insn that copies the
2027 function address into the register that the
2028 call insn will use. flow.c will delete any
2029 redundant stores that we have created. */
2030 if (GET_CODE (next) == CALL_INSN
2031 && GET_CODE (body) == SET
2032 && GET_CODE (SET_DEST (body)) == REG
2033 && (n = find_reg_note (temp, REG_EQUAL,
2034 NULL_RTX)))
2036 fn_reg = SET_SRC (body);
2037 if (GET_CODE (fn_reg) != REG)
2038 fn_reg = SET_DEST (body);
2039 fn_address = XEXP (n, 0);
2040 fn_address_insn = temp;
2042 /* We have the call insn.
2043 If it uses the register we suspect it might,
2044 load it with the correct address directly. */
2045 if (GET_CODE (temp) == CALL_INSN
2046 && fn_address != 0
2047 && reg_referenced_p (fn_reg, body))
2048 loop_insn_emit_after (loop, 0, fn_address_insn,
2049 gen_move_insn
2050 (fn_reg, fn_address));
2052 if (GET_CODE (temp) == CALL_INSN)
2054 i1 = loop_call_insn_hoist (loop, body);
2055 /* Because the USAGE information potentially
2056 contains objects other than hard registers
2057 we need to copy it. */
2058 if (CALL_INSN_FUNCTION_USAGE (temp))
2059 CALL_INSN_FUNCTION_USAGE (i1)
2060 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2062 else
2063 i1 = loop_insn_hoist (loop, body);
2064 if (first == 0)
2065 first = i1;
2066 if (temp == fn_address_insn)
2067 fn_address_insn = i1;
2068 REG_NOTES (i1) = REG_NOTES (temp);
2069 REG_NOTES (temp) = NULL;
2070 delete_insn (temp);
2072 if (new_start == 0)
2073 new_start = first;
2075 if (m->savemode != VOIDmode)
2077 /* P sets REG to zero; but we should clear only
2078 the bits that are not covered by the mode
2079 m->savemode. */
2080 rtx reg = m->set_dest;
2081 rtx sequence;
2082 rtx tem;
2084 start_sequence ();
2085 tem = expand_simple_binop
2086 (GET_MODE (reg), AND, reg,
2087 GEN_INT ((((HOST_WIDE_INT) 1
2088 << GET_MODE_BITSIZE (m->savemode)))
2089 - 1),
2090 reg, 1, OPTAB_LIB_WIDEN);
2091 if (tem == 0)
2092 abort ();
2093 if (tem != reg)
2094 emit_move_insn (reg, tem);
2095 sequence = get_insns ();
2096 end_sequence ();
2097 i1 = loop_insn_hoist (loop, sequence);
2099 else if (GET_CODE (p) == CALL_INSN)
2101 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2102 /* Because the USAGE information potentially
2103 contains objects other than hard registers
2104 we need to copy it. */
2105 if (CALL_INSN_FUNCTION_USAGE (p))
2106 CALL_INSN_FUNCTION_USAGE (i1)
2107 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2109 else if (count == m->consec && m->move_insn_first)
2111 rtx seq;
2112 /* The SET_SRC might not be invariant, so we must
2113 use the REG_EQUAL note. */
2114 start_sequence ();
2115 emit_move_insn (m->set_dest, m->set_src);
2116 seq = get_insns ();
2117 end_sequence ();
2119 add_label_notes (m->set_src, seq);
2121 i1 = loop_insn_hoist (loop, seq);
2122 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2123 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2124 : REG_EQUAL, m->set_src);
2126 else
2127 i1 = loop_insn_hoist (loop, PATTERN (p));
2129 if (REG_NOTES (i1) == 0)
2131 REG_NOTES (i1) = REG_NOTES (p);
2132 REG_NOTES (p) = NULL;
2134 /* If there is a REG_EQUAL note present whose value
2135 is not loop invariant, then delete it, since it
2136 may cause problems with later optimization passes.
2137 It is possible for cse to create such notes
2138 like this as a result of record_jump_cond. */
2140 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2141 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2142 remove_note (i1, temp);
2145 if (new_start == 0)
2146 new_start = i1;
2148 if (loop_dump_stream)
2149 fprintf (loop_dump_stream, " moved to %d",
2150 INSN_UID (i1));
2152 /* If library call, now fix the REG_NOTES that contain
2153 insn pointers, namely REG_LIBCALL on FIRST
2154 and REG_RETVAL on I1. */
2155 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2157 XEXP (temp, 0) = first;
2158 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2159 XEXP (temp, 0) = i1;
2162 temp = p;
2163 delete_insn (p);
2164 p = NEXT_INSN (p);
2166 /* simplify_giv_expr expects that it can walk the insns
2167 at m->insn forwards and see this old sequence we are
2168 tossing here. delete_insn does preserve the next
2169 pointers, but when we skip over a NOTE we must fix
2170 it up. Otherwise that code walks into the non-deleted
2171 insn stream. */
2172 while (p && GET_CODE (p) == NOTE)
2173 p = NEXT_INSN (temp) = NEXT_INSN (p);
2176 /* The more regs we move, the less we like moving them. */
2177 threshold -= 3;
2180 /* Any other movable that loads the same register
2181 MUST be moved. */
2182 already_moved[regno] = 1;
2184 /* This reg has been moved out of one loop. */
2185 regs->array[regno].moved_once = 1;
2187 /* The reg set here is now invariant. */
2188 if (! m->partial)
2190 int i;
2191 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2192 regs->array[regno+i].set_in_loop = 0;
2195 m->done = 1;
2197 /* Change the length-of-life info for the register
2198 to say it lives at least the full length of this loop.
2199 This will help guide optimizations in outer loops. */
2201 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2202 /* This is the old insn before all the moved insns.
2203 We can't use the moved insn because it is out of range
2204 in uid_luid. Only the old insns have luids. */
2205 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2206 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2207 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2209 /* Combine with this moved insn any other matching movables. */
2211 if (! m->partial)
2212 for (m1 = movables->head; m1; m1 = m1->next)
2213 if (m1->match == m)
2215 rtx temp;
2217 /* Schedule the reg loaded by M1
2218 for replacement so that shares the reg of M.
2219 If the modes differ (only possible in restricted
2220 circumstances, make a SUBREG.
2222 Note this assumes that the target dependent files
2223 treat REG and SUBREG equally, including within
2224 GO_IF_LEGITIMATE_ADDRESS and in all the
2225 predicates since we never verify that replacing the
2226 original register with a SUBREG results in a
2227 recognizable insn. */
2228 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2229 reg_map[m1->regno] = m->set_dest;
2230 else
2231 reg_map[m1->regno]
2232 = gen_lowpart_common (GET_MODE (m1->set_dest),
2233 m->set_dest);
2235 /* Get rid of the matching insn
2236 and prevent further processing of it. */
2237 m1->done = 1;
2239 /* if library call, delete all insns. */
2240 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2241 NULL_RTX)))
2242 delete_insn_chain (XEXP (temp, 0), m1->insn);
2243 else
2244 delete_insn (m1->insn);
2246 /* Any other movable that loads the same register
2247 MUST be moved. */
2248 already_moved[m1->regno] = 1;
2250 /* The reg merged here is now invariant,
2251 if the reg it matches is invariant. */
2252 if (! m->partial)
2254 int i;
2255 for (i = 0;
2256 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2257 i++)
2258 regs->array[m1->regno+i].set_in_loop = 0;
2262 else if (loop_dump_stream)
2263 fprintf (loop_dump_stream, "not desirable");
2265 else if (loop_dump_stream && !m->match)
2266 fprintf (loop_dump_stream, "not safe");
2268 if (loop_dump_stream)
2269 fprintf (loop_dump_stream, "\n");
2272 if (new_start == 0)
2273 new_start = loop_start;
2275 /* Go through all the instructions in the loop, making
2276 all the register substitutions scheduled in REG_MAP. */
2277 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2278 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2279 || GET_CODE (p) == CALL_INSN)
2281 replace_regs (PATTERN (p), reg_map, nregs, 0);
2282 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2283 INSN_CODE (p) = -1;
2286 /* Clean up. */
2287 free (reg_map);
2288 free (already_moved);
2292 static void
2293 loop_movables_add (movables, m)
2294 struct loop_movables *movables;
2295 struct movable *m;
2297 if (movables->head == 0)
2298 movables->head = m;
2299 else
2300 movables->last->next = m;
2301 movables->last = m;
2305 static void
2306 loop_movables_free (movables)
2307 struct loop_movables *movables;
2309 struct movable *m;
2310 struct movable *m_next;
2312 for (m = movables->head; m; m = m_next)
2314 m_next = m->next;
2315 free (m);
2319 #if 0
2320 /* Scan X and replace the address of any MEM in it with ADDR.
2321 REG is the address that MEM should have before the replacement. */
2323 static void
2324 replace_call_address (x, reg, addr)
2325 rtx x, reg, addr;
2327 enum rtx_code code;
2328 int i;
2329 const char *fmt;
2331 if (x == 0)
2332 return;
2333 code = GET_CODE (x);
2334 switch (code)
2336 case PC:
2337 case CC0:
2338 case CONST_INT:
2339 case CONST_DOUBLE:
2340 case CONST:
2341 case SYMBOL_REF:
2342 case LABEL_REF:
2343 case REG:
2344 return;
2346 case SET:
2347 /* Short cut for very common case. */
2348 replace_call_address (XEXP (x, 1), reg, addr);
2349 return;
2351 case CALL:
2352 /* Short cut for very common case. */
2353 replace_call_address (XEXP (x, 0), reg, addr);
2354 return;
2356 case MEM:
2357 /* If this MEM uses a reg other than the one we expected,
2358 something is wrong. */
2359 if (XEXP (x, 0) != reg)
2360 abort ();
2361 XEXP (x, 0) = addr;
2362 return;
2364 default:
2365 break;
2368 fmt = GET_RTX_FORMAT (code);
2369 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2371 if (fmt[i] == 'e')
2372 replace_call_address (XEXP (x, i), reg, addr);
2373 else if (fmt[i] == 'E')
2375 int j;
2376 for (j = 0; j < XVECLEN (x, i); j++)
2377 replace_call_address (XVECEXP (x, i, j), reg, addr);
2381 #endif
2383 /* Return the number of memory refs to addresses that vary
2384 in the rtx X. */
2386 static int
2387 count_nonfixed_reads (loop, x)
2388 const struct loop *loop;
2389 rtx x;
2391 enum rtx_code code;
2392 int i;
2393 const char *fmt;
2394 int value;
2396 if (x == 0)
2397 return 0;
2399 code = GET_CODE (x);
2400 switch (code)
2402 case PC:
2403 case CC0:
2404 case CONST_INT:
2405 case CONST_DOUBLE:
2406 case CONST:
2407 case SYMBOL_REF:
2408 case LABEL_REF:
2409 case REG:
2410 return 0;
2412 case MEM:
2413 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2414 + count_nonfixed_reads (loop, XEXP (x, 0)));
2416 default:
2417 break;
2420 value = 0;
2421 fmt = GET_RTX_FORMAT (code);
2422 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2424 if (fmt[i] == 'e')
2425 value += count_nonfixed_reads (loop, XEXP (x, i));
2426 if (fmt[i] == 'E')
2428 int j;
2429 for (j = 0; j < XVECLEN (x, i); j++)
2430 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2433 return value;
2436 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2437 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2438 `unknown_address_altered', `unknown_constant_address_altered', and
2439 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2440 list `store_mems' in LOOP. */
2442 static void
2443 prescan_loop (loop)
2444 struct loop *loop;
2446 int level = 1;
2447 rtx insn;
2448 struct loop_info *loop_info = LOOP_INFO (loop);
2449 rtx start = loop->start;
2450 rtx end = loop->end;
2451 /* The label after END. Jumping here is just like falling off the
2452 end of the loop. We use next_nonnote_insn instead of next_label
2453 as a hedge against the (pathological) case where some actual insn
2454 might end up between the two. */
2455 rtx exit_target = next_nonnote_insn (end);
2457 loop_info->has_indirect_jump = indirect_jump_in_function;
2458 loop_info->pre_header_has_call = 0;
2459 loop_info->has_call = 0;
2460 loop_info->has_nonconst_call = 0;
2461 loop_info->has_prefetch = 0;
2462 loop_info->has_volatile = 0;
2463 loop_info->has_tablejump = 0;
2464 loop_info->has_multiple_exit_targets = 0;
2465 loop->level = 1;
2467 loop_info->unknown_address_altered = 0;
2468 loop_info->unknown_constant_address_altered = 0;
2469 loop_info->store_mems = NULL_RTX;
2470 loop_info->first_loop_store_insn = NULL_RTX;
2471 loop_info->mems_idx = 0;
2472 loop_info->num_mem_sets = 0;
2473 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2474 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2476 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2477 insn = PREV_INSN (insn))
2479 if (GET_CODE (insn) == CALL_INSN)
2481 loop_info->pre_header_has_call = 1;
2482 break;
2486 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2487 insn = NEXT_INSN (insn))
2489 switch (GET_CODE (insn))
2491 case NOTE:
2492 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2494 ++level;
2495 /* Count number of loops contained in this one. */
2496 loop->level++;
2498 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2499 --level;
2500 break;
2502 case CALL_INSN:
2503 if (! CONST_OR_PURE_CALL_P (insn))
2505 loop_info->unknown_address_altered = 1;
2506 loop_info->has_nonconst_call = 1;
2508 else if (pure_call_p (insn))
2509 loop_info->has_nonconst_call = 1;
2510 loop_info->has_call = 1;
2511 if (can_throw_internal (insn))
2512 loop_info->has_multiple_exit_targets = 1;
2513 break;
2515 case JUMP_INSN:
2516 if (! loop_info->has_multiple_exit_targets)
2518 rtx set = pc_set (insn);
2520 if (set)
2522 rtx src = SET_SRC (set);
2523 rtx label1, label2;
2525 if (GET_CODE (src) == IF_THEN_ELSE)
2527 label1 = XEXP (src, 1);
2528 label2 = XEXP (src, 2);
2530 else
2532 label1 = src;
2533 label2 = NULL_RTX;
2538 if (label1 && label1 != pc_rtx)
2540 if (GET_CODE (label1) != LABEL_REF)
2542 /* Something tricky. */
2543 loop_info->has_multiple_exit_targets = 1;
2544 break;
2546 else if (XEXP (label1, 0) != exit_target
2547 && LABEL_OUTSIDE_LOOP_P (label1))
2549 /* A jump outside the current loop. */
2550 loop_info->has_multiple_exit_targets = 1;
2551 break;
2555 label1 = label2;
2556 label2 = NULL_RTX;
2558 while (label1);
2560 else
2562 /* A return, or something tricky. */
2563 loop_info->has_multiple_exit_targets = 1;
2566 /* FALLTHRU */
2568 case INSN:
2569 if (volatile_refs_p (PATTERN (insn)))
2570 loop_info->has_volatile = 1;
2572 if (GET_CODE (insn) == JUMP_INSN
2573 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2574 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2575 loop_info->has_tablejump = 1;
2577 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2578 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2579 loop_info->first_loop_store_insn = insn;
2581 if (flag_non_call_exceptions && can_throw_internal (insn))
2582 loop_info->has_multiple_exit_targets = 1;
2583 break;
2585 default:
2586 break;
2590 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2591 if (/* An exception thrown by a called function might land us
2592 anywhere. */
2593 ! loop_info->has_nonconst_call
2594 /* We don't want loads for MEMs moved to a location before the
2595 one at which their stack memory becomes allocated. (Note
2596 that this is not a problem for malloc, etc., since those
2597 require actual function calls. */
2598 && ! current_function_calls_alloca
2599 /* There are ways to leave the loop other than falling off the
2600 end. */
2601 && ! loop_info->has_multiple_exit_targets)
2602 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2603 insn = NEXT_INSN (insn))
2604 for_each_rtx (&insn, insert_loop_mem, loop_info);
2606 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2607 that loop_invariant_p and load_mems can use true_dependence
2608 to determine what is really clobbered. */
2609 if (loop_info->unknown_address_altered)
2611 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2613 loop_info->store_mems
2614 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2616 if (loop_info->unknown_constant_address_altered)
2618 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2620 RTX_UNCHANGING_P (mem) = 1;
2621 loop_info->store_mems
2622 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2626 /* Invalidate all loops containing LABEL. */
2628 static void
2629 invalidate_loops_containing_label (label)
2630 rtx label;
2632 struct loop *loop;
2633 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2634 loop->invalid = 1;
2637 /* Scan the function looking for loops. Record the start and end of each loop.
2638 Also mark as invalid loops any loops that contain a setjmp or are branched
2639 to from outside the loop. */
2641 static void
2642 find_and_verify_loops (f, loops)
2643 rtx f;
2644 struct loops *loops;
2646 rtx insn;
2647 rtx label;
2648 int num_loops;
2649 struct loop *current_loop;
2650 struct loop *next_loop;
2651 struct loop *loop;
2653 num_loops = loops->num;
2655 compute_luids (f, NULL_RTX, 0);
2657 /* If there are jumps to undefined labels,
2658 treat them as jumps out of any/all loops.
2659 This also avoids writing past end of tables when there are no loops. */
2660 uid_loop[0] = NULL;
2662 /* Find boundaries of loops, mark which loops are contained within
2663 loops, and invalidate loops that have setjmp. */
2665 num_loops = 0;
2666 current_loop = NULL;
2667 for (insn = f; insn; insn = NEXT_INSN (insn))
2669 if (GET_CODE (insn) == NOTE)
2670 switch (NOTE_LINE_NUMBER (insn))
2672 case NOTE_INSN_LOOP_BEG:
2673 next_loop = loops->array + num_loops;
2674 next_loop->num = num_loops;
2675 num_loops++;
2676 next_loop->start = insn;
2677 next_loop->outer = current_loop;
2678 current_loop = next_loop;
2679 break;
2681 case NOTE_INSN_LOOP_CONT:
2682 current_loop->cont = insn;
2683 break;
2685 case NOTE_INSN_LOOP_VTOP:
2686 current_loop->vtop = insn;
2687 break;
2689 case NOTE_INSN_LOOP_END:
2690 if (! current_loop)
2691 abort ();
2693 current_loop->end = insn;
2694 current_loop = current_loop->outer;
2695 break;
2697 default:
2698 break;
2701 if (GET_CODE (insn) == CALL_INSN
2702 && find_reg_note (insn, REG_SETJMP, NULL))
2704 /* In this case, we must invalidate our current loop and any
2705 enclosing loop. */
2706 for (loop = current_loop; loop; loop = loop->outer)
2708 loop->invalid = 1;
2709 if (loop_dump_stream)
2710 fprintf (loop_dump_stream,
2711 "\nLoop at %d ignored due to setjmp.\n",
2712 INSN_UID (loop->start));
2716 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2717 enclosing loop, but this doesn't matter. */
2718 uid_loop[INSN_UID (insn)] = current_loop;
2721 /* Any loop containing a label used in an initializer must be invalidated,
2722 because it can be jumped into from anywhere. */
2723 for (label = forced_labels; label; label = XEXP (label, 1))
2724 invalidate_loops_containing_label (XEXP (label, 0));
2726 /* Any loop containing a label used for an exception handler must be
2727 invalidated, because it can be jumped into from anywhere. */
2728 for_each_eh_label (invalidate_loops_containing_label);
2730 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2731 loop that it is not contained within, that loop is marked invalid.
2732 If any INSN or CALL_INSN uses a label's address, then the loop containing
2733 that label is marked invalid, because it could be jumped into from
2734 anywhere.
2736 Also look for blocks of code ending in an unconditional branch that
2737 exits the loop. If such a block is surrounded by a conditional
2738 branch around the block, move the block elsewhere (see below) and
2739 invert the jump to point to the code block. This may eliminate a
2740 label in our loop and will simplify processing by both us and a
2741 possible second cse pass. */
2743 for (insn = f; insn; insn = NEXT_INSN (insn))
2744 if (INSN_P (insn))
2746 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2748 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2750 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2751 if (note)
2752 invalidate_loops_containing_label (XEXP (note, 0));
2755 if (GET_CODE (insn) != JUMP_INSN)
2756 continue;
2758 mark_loop_jump (PATTERN (insn), this_loop);
2760 /* See if this is an unconditional branch outside the loop. */
2761 if (this_loop
2762 && (GET_CODE (PATTERN (insn)) == RETURN
2763 || (any_uncondjump_p (insn)
2764 && onlyjump_p (insn)
2765 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2766 != this_loop)))
2767 && get_max_uid () < max_uid_for_loop)
2769 rtx p;
2770 rtx our_next = next_real_insn (insn);
2771 rtx last_insn_to_move = NEXT_INSN (insn);
2772 struct loop *dest_loop;
2773 struct loop *outer_loop = NULL;
2775 /* Go backwards until we reach the start of the loop, a label,
2776 or a JUMP_INSN. */
2777 for (p = PREV_INSN (insn);
2778 GET_CODE (p) != CODE_LABEL
2779 && ! (GET_CODE (p) == NOTE
2780 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2781 && GET_CODE (p) != JUMP_INSN;
2782 p = PREV_INSN (p))
2785 /* Check for the case where we have a jump to an inner nested
2786 loop, and do not perform the optimization in that case. */
2788 if (JUMP_LABEL (insn))
2790 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2791 if (dest_loop)
2793 for (outer_loop = dest_loop; outer_loop;
2794 outer_loop = outer_loop->outer)
2795 if (outer_loop == this_loop)
2796 break;
2800 /* Make sure that the target of P is within the current loop. */
2802 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2803 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2804 outer_loop = this_loop;
2806 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2807 we have a block of code to try to move.
2809 We look backward and then forward from the target of INSN
2810 to find a BARRIER at the same loop depth as the target.
2811 If we find such a BARRIER, we make a new label for the start
2812 of the block, invert the jump in P and point it to that label,
2813 and move the block of code to the spot we found. */
2815 if (! outer_loop
2816 && GET_CODE (p) == JUMP_INSN
2817 && JUMP_LABEL (p) != 0
2818 /* Just ignore jumps to labels that were never emitted.
2819 These always indicate compilation errors. */
2820 && INSN_UID (JUMP_LABEL (p)) != 0
2821 && any_condjump_p (p) && onlyjump_p (p)
2822 && next_real_insn (JUMP_LABEL (p)) == our_next
2823 /* If it's not safe to move the sequence, then we
2824 mustn't try. */
2825 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2826 &last_insn_to_move))
2828 rtx target
2829 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2830 struct loop *target_loop = uid_loop[INSN_UID (target)];
2831 rtx loc, loc2;
2832 rtx tmp;
2834 /* Search for possible garbage past the conditional jumps
2835 and look for the last barrier. */
2836 for (tmp = last_insn_to_move;
2837 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2838 if (GET_CODE (tmp) == BARRIER)
2839 last_insn_to_move = tmp;
2841 for (loc = target; loc; loc = PREV_INSN (loc))
2842 if (GET_CODE (loc) == BARRIER
2843 /* Don't move things inside a tablejump. */
2844 && ((loc2 = next_nonnote_insn (loc)) == 0
2845 || GET_CODE (loc2) != CODE_LABEL
2846 || (loc2 = next_nonnote_insn (loc2)) == 0
2847 || GET_CODE (loc2) != JUMP_INSN
2848 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2849 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2850 && uid_loop[INSN_UID (loc)] == target_loop)
2851 break;
2853 if (loc == 0)
2854 for (loc = target; loc; loc = NEXT_INSN (loc))
2855 if (GET_CODE (loc) == BARRIER
2856 /* Don't move things inside a tablejump. */
2857 && ((loc2 = next_nonnote_insn (loc)) == 0
2858 || GET_CODE (loc2) != CODE_LABEL
2859 || (loc2 = next_nonnote_insn (loc2)) == 0
2860 || GET_CODE (loc2) != JUMP_INSN
2861 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2862 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2863 && uid_loop[INSN_UID (loc)] == target_loop)
2864 break;
2866 if (loc)
2868 rtx cond_label = JUMP_LABEL (p);
2869 rtx new_label = get_label_after (p);
2871 /* Ensure our label doesn't go away. */
2872 LABEL_NUSES (cond_label)++;
2874 /* Verify that uid_loop is large enough and that
2875 we can invert P. */
2876 if (invert_jump (p, new_label, 1))
2878 rtx q, r;
2880 /* If no suitable BARRIER was found, create a suitable
2881 one before TARGET. Since TARGET is a fall through
2882 path, we'll need to insert a jump around our block
2883 and add a BARRIER before TARGET.
2885 This creates an extra unconditional jump outside
2886 the loop. However, the benefits of removing rarely
2887 executed instructions from inside the loop usually
2888 outweighs the cost of the extra unconditional jump
2889 outside the loop. */
2890 if (loc == 0)
2892 rtx temp;
2894 temp = gen_jump (JUMP_LABEL (insn));
2895 temp = emit_jump_insn_before (temp, target);
2896 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2897 LABEL_NUSES (JUMP_LABEL (insn))++;
2898 loc = emit_barrier_before (target);
2901 /* Include the BARRIER after INSN and copy the
2902 block after LOC. */
2903 if (squeeze_notes (&new_label, &last_insn_to_move))
2904 abort ();
2905 reorder_insns (new_label, last_insn_to_move, loc);
2907 /* All those insns are now in TARGET_LOOP. */
2908 for (q = new_label;
2909 q != NEXT_INSN (last_insn_to_move);
2910 q = NEXT_INSN (q))
2911 uid_loop[INSN_UID (q)] = target_loop;
2913 /* The label jumped to by INSN is no longer a loop
2914 exit. Unless INSN does not have a label (e.g.,
2915 it is a RETURN insn), search loop->exit_labels
2916 to find its label_ref, and remove it. Also turn
2917 off LABEL_OUTSIDE_LOOP_P bit. */
2918 if (JUMP_LABEL (insn))
2920 for (q = 0, r = this_loop->exit_labels;
2922 q = r, r = LABEL_NEXTREF (r))
2923 if (XEXP (r, 0) == JUMP_LABEL (insn))
2925 LABEL_OUTSIDE_LOOP_P (r) = 0;
2926 if (q)
2927 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2928 else
2929 this_loop->exit_labels = LABEL_NEXTREF (r);
2930 break;
2933 for (loop = this_loop; loop && loop != target_loop;
2934 loop = loop->outer)
2935 loop->exit_count--;
2937 /* If we didn't find it, then something is
2938 wrong. */
2939 if (! r)
2940 abort ();
2943 /* P is now a jump outside the loop, so it must be put
2944 in loop->exit_labels, and marked as such.
2945 The easiest way to do this is to just call
2946 mark_loop_jump again for P. */
2947 mark_loop_jump (PATTERN (p), this_loop);
2949 /* If INSN now jumps to the insn after it,
2950 delete INSN. */
2951 if (JUMP_LABEL (insn) != 0
2952 && (next_real_insn (JUMP_LABEL (insn))
2953 == next_real_insn (insn)))
2954 delete_related_insns (insn);
2957 /* Continue the loop after where the conditional
2958 branch used to jump, since the only branch insn
2959 in the block (if it still remains) is an inter-loop
2960 branch and hence needs no processing. */
2961 insn = NEXT_INSN (cond_label);
2963 if (--LABEL_NUSES (cond_label) == 0)
2964 delete_related_insns (cond_label);
2966 /* This loop will be continued with NEXT_INSN (insn). */
2967 insn = PREV_INSN (insn);
2974 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2975 loops it is contained in, mark the target loop invalid.
2977 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2979 static void
2980 mark_loop_jump (x, loop)
2981 rtx x;
2982 struct loop *loop;
2984 struct loop *dest_loop;
2985 struct loop *outer_loop;
2986 int i;
2988 switch (GET_CODE (x))
2990 case PC:
2991 case USE:
2992 case CLOBBER:
2993 case REG:
2994 case MEM:
2995 case CONST_INT:
2996 case CONST_DOUBLE:
2997 case RETURN:
2998 return;
3000 case CONST:
3001 /* There could be a label reference in here. */
3002 mark_loop_jump (XEXP (x, 0), loop);
3003 return;
3005 case PLUS:
3006 case MINUS:
3007 case MULT:
3008 mark_loop_jump (XEXP (x, 0), loop);
3009 mark_loop_jump (XEXP (x, 1), loop);
3010 return;
3012 case LO_SUM:
3013 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3014 mark_loop_jump (XEXP (x, 1), loop);
3015 return;
3017 case SIGN_EXTEND:
3018 case ZERO_EXTEND:
3019 mark_loop_jump (XEXP (x, 0), loop);
3020 return;
3022 case LABEL_REF:
3023 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3025 /* Link together all labels that branch outside the loop. This
3026 is used by final_[bg]iv_value and the loop unrolling code. Also
3027 mark this LABEL_REF so we know that this branch should predict
3028 false. */
3030 /* A check to make sure the label is not in an inner nested loop,
3031 since this does not count as a loop exit. */
3032 if (dest_loop)
3034 for (outer_loop = dest_loop; outer_loop;
3035 outer_loop = outer_loop->outer)
3036 if (outer_loop == loop)
3037 break;
3039 else
3040 outer_loop = NULL;
3042 if (loop && ! outer_loop)
3044 LABEL_OUTSIDE_LOOP_P (x) = 1;
3045 LABEL_NEXTREF (x) = loop->exit_labels;
3046 loop->exit_labels = x;
3048 for (outer_loop = loop;
3049 outer_loop && outer_loop != dest_loop;
3050 outer_loop = outer_loop->outer)
3051 outer_loop->exit_count++;
3054 /* If this is inside a loop, but not in the current loop or one enclosed
3055 by it, it invalidates at least one loop. */
3057 if (! dest_loop)
3058 return;
3060 /* We must invalidate every nested loop containing the target of this
3061 label, except those that also contain the jump insn. */
3063 for (; dest_loop; dest_loop = dest_loop->outer)
3065 /* Stop when we reach a loop that also contains the jump insn. */
3066 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3067 if (dest_loop == outer_loop)
3068 return;
3070 /* If we get here, we know we need to invalidate a loop. */
3071 if (loop_dump_stream && ! dest_loop->invalid)
3072 fprintf (loop_dump_stream,
3073 "\nLoop at %d ignored due to multiple entry points.\n",
3074 INSN_UID (dest_loop->start));
3076 dest_loop->invalid = 1;
3078 return;
3080 case SET:
3081 /* If this is not setting pc, ignore. */
3082 if (SET_DEST (x) == pc_rtx)
3083 mark_loop_jump (SET_SRC (x), loop);
3084 return;
3086 case IF_THEN_ELSE:
3087 mark_loop_jump (XEXP (x, 1), loop);
3088 mark_loop_jump (XEXP (x, 2), loop);
3089 return;
3091 case PARALLEL:
3092 case ADDR_VEC:
3093 for (i = 0; i < XVECLEN (x, 0); i++)
3094 mark_loop_jump (XVECEXP (x, 0, i), loop);
3095 return;
3097 case ADDR_DIFF_VEC:
3098 for (i = 0; i < XVECLEN (x, 1); i++)
3099 mark_loop_jump (XVECEXP (x, 1, i), loop);
3100 return;
3102 default:
3103 /* Strictly speaking this is not a jump into the loop, only a possible
3104 jump out of the loop. However, we have no way to link the destination
3105 of this jump onto the list of exit labels. To be safe we mark this
3106 loop and any containing loops as invalid. */
3107 if (loop)
3109 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3111 if (loop_dump_stream && ! outer_loop->invalid)
3112 fprintf (loop_dump_stream,
3113 "\nLoop at %d ignored due to unknown exit jump.\n",
3114 INSN_UID (outer_loop->start));
3115 outer_loop->invalid = 1;
3118 return;
3122 /* Return nonzero if there is a label in the range from
3123 insn INSN to and including the insn whose luid is END
3124 INSN must have an assigned luid (i.e., it must not have
3125 been previously created by loop.c). */
3127 static int
3128 labels_in_range_p (insn, end)
3129 rtx insn;
3130 int end;
3132 while (insn && INSN_LUID (insn) <= end)
3134 if (GET_CODE (insn) == CODE_LABEL)
3135 return 1;
3136 insn = NEXT_INSN (insn);
3139 return 0;
3142 /* Record that a memory reference X is being set. */
3144 static void
3145 note_addr_stored (x, y, data)
3146 rtx x;
3147 rtx y ATTRIBUTE_UNUSED;
3148 void *data ATTRIBUTE_UNUSED;
3150 struct loop_info *loop_info = data;
3152 if (x == 0 || GET_CODE (x) != MEM)
3153 return;
3155 /* Count number of memory writes.
3156 This affects heuristics in strength_reduce. */
3157 loop_info->num_mem_sets++;
3159 /* BLKmode MEM means all memory is clobbered. */
3160 if (GET_MODE (x) == BLKmode)
3162 if (RTX_UNCHANGING_P (x))
3163 loop_info->unknown_constant_address_altered = 1;
3164 else
3165 loop_info->unknown_address_altered = 1;
3167 return;
3170 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3171 loop_info->store_mems);
3174 /* X is a value modified by an INSN that references a biv inside a loop
3175 exit test (ie, X is somehow related to the value of the biv). If X
3176 is a pseudo that is used more than once, then the biv is (effectively)
3177 used more than once. DATA is a pointer to a loop_regs structure. */
3179 static void
3180 note_set_pseudo_multiple_uses (x, y, data)
3181 rtx x;
3182 rtx y ATTRIBUTE_UNUSED;
3183 void *data;
3185 struct loop_regs *regs = (struct loop_regs *) data;
3187 if (x == 0)
3188 return;
3190 while (GET_CODE (x) == STRICT_LOW_PART
3191 || GET_CODE (x) == SIGN_EXTRACT
3192 || GET_CODE (x) == ZERO_EXTRACT
3193 || GET_CODE (x) == SUBREG)
3194 x = XEXP (x, 0);
3196 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3197 return;
3199 /* If we do not have usage information, or if we know the register
3200 is used more than once, note that fact for check_dbra_loop. */
3201 if (REGNO (x) >= max_reg_before_loop
3202 || ! regs->array[REGNO (x)].single_usage
3203 || regs->array[REGNO (x)].single_usage == const0_rtx)
3204 regs->multiple_uses = 1;
3207 /* Return nonzero if the rtx X is invariant over the current loop.
3209 The value is 2 if we refer to something only conditionally invariant.
3211 A memory ref is invariant if it is not volatile and does not conflict
3212 with anything stored in `loop_info->store_mems'. */
3215 loop_invariant_p (loop, x)
3216 const struct loop *loop;
3217 rtx x;
3219 struct loop_info *loop_info = LOOP_INFO (loop);
3220 struct loop_regs *regs = LOOP_REGS (loop);
3221 int i;
3222 enum rtx_code code;
3223 const char *fmt;
3224 int conditional = 0;
3225 rtx mem_list_entry;
3227 if (x == 0)
3228 return 1;
3229 code = GET_CODE (x);
3230 switch (code)
3232 case CONST_INT:
3233 case CONST_DOUBLE:
3234 case SYMBOL_REF:
3235 case CONST:
3236 return 1;
3238 case LABEL_REF:
3239 /* A LABEL_REF is normally invariant, however, if we are unrolling
3240 loops, and this label is inside the loop, then it isn't invariant.
3241 This is because each unrolled copy of the loop body will have
3242 a copy of this label. If this was invariant, then an insn loading
3243 the address of this label into a register might get moved outside
3244 the loop, and then each loop body would end up using the same label.
3246 We don't know the loop bounds here though, so just fail for all
3247 labels. */
3248 if (flag_unroll_loops)
3249 return 0;
3250 else
3251 return 1;
3253 case PC:
3254 case CC0:
3255 case UNSPEC_VOLATILE:
3256 return 0;
3258 case REG:
3259 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3260 since the reg might be set by initialization within the loop. */
3262 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3263 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3264 && ! current_function_has_nonlocal_goto)
3265 return 1;
3267 if (LOOP_INFO (loop)->has_call
3268 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3269 return 0;
3271 /* Out-of-range regs can occur when we are called from unrolling.
3272 These have always been created by the unroller and are set in
3273 the loop, hence are never invariant. */
3275 if (REGNO (x) >= (unsigned) regs->num)
3276 return 0;
3278 if (regs->array[REGNO (x)].set_in_loop < 0)
3279 return 2;
3281 return regs->array[REGNO (x)].set_in_loop == 0;
3283 case MEM:
3284 /* Volatile memory references must be rejected. Do this before
3285 checking for read-only items, so that volatile read-only items
3286 will be rejected also. */
3287 if (MEM_VOLATILE_P (x))
3288 return 0;
3290 /* See if there is any dependence between a store and this load. */
3291 mem_list_entry = loop_info->store_mems;
3292 while (mem_list_entry)
3294 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3295 x, rtx_varies_p))
3296 return 0;
3298 mem_list_entry = XEXP (mem_list_entry, 1);
3301 /* It's not invalidated by a store in memory
3302 but we must still verify the address is invariant. */
3303 break;
3305 case ASM_OPERANDS:
3306 /* Don't mess with insns declared volatile. */
3307 if (MEM_VOLATILE_P (x))
3308 return 0;
3309 break;
3311 default:
3312 break;
3315 fmt = GET_RTX_FORMAT (code);
3316 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3318 if (fmt[i] == 'e')
3320 int tem = loop_invariant_p (loop, XEXP (x, i));
3321 if (tem == 0)
3322 return 0;
3323 if (tem == 2)
3324 conditional = 1;
3326 else if (fmt[i] == 'E')
3328 int j;
3329 for (j = 0; j < XVECLEN (x, i); j++)
3331 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3332 if (tem == 0)
3333 return 0;
3334 if (tem == 2)
3335 conditional = 1;
3341 return 1 + conditional;
3344 /* Return nonzero if all the insns in the loop that set REG
3345 are INSN and the immediately following insns,
3346 and if each of those insns sets REG in an invariant way
3347 (not counting uses of REG in them).
3349 The value is 2 if some of these insns are only conditionally invariant.
3351 We assume that INSN itself is the first set of REG
3352 and that its source is invariant. */
3354 static int
3355 consec_sets_invariant_p (loop, reg, n_sets, insn)
3356 const struct loop *loop;
3357 int n_sets;
3358 rtx reg, insn;
3360 struct loop_regs *regs = LOOP_REGS (loop);
3361 rtx p = insn;
3362 unsigned int regno = REGNO (reg);
3363 rtx temp;
3364 /* Number of sets we have to insist on finding after INSN. */
3365 int count = n_sets - 1;
3366 int old = regs->array[regno].set_in_loop;
3367 int value = 0;
3368 int this;
3370 /* If N_SETS hit the limit, we can't rely on its value. */
3371 if (n_sets == 127)
3372 return 0;
3374 regs->array[regno].set_in_loop = 0;
3376 while (count > 0)
3378 enum rtx_code code;
3379 rtx set;
3381 p = NEXT_INSN (p);
3382 code = GET_CODE (p);
3384 /* If library call, skip to end of it. */
3385 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3386 p = XEXP (temp, 0);
3388 this = 0;
3389 if (code == INSN
3390 && (set = single_set (p))
3391 && GET_CODE (SET_DEST (set)) == REG
3392 && REGNO (SET_DEST (set)) == regno)
3394 this = loop_invariant_p (loop, SET_SRC (set));
3395 if (this != 0)
3396 value |= this;
3397 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3399 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3400 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3401 notes are OK. */
3402 this = (CONSTANT_P (XEXP (temp, 0))
3403 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3404 && loop_invariant_p (loop, XEXP (temp, 0))));
3405 if (this != 0)
3406 value |= this;
3409 if (this != 0)
3410 count--;
3411 else if (code != NOTE)
3413 regs->array[regno].set_in_loop = old;
3414 return 0;
3418 regs->array[regno].set_in_loop = old;
3419 /* If loop_invariant_p ever returned 2, we return 2. */
3420 return 1 + (value & 2);
3423 #if 0
3424 /* I don't think this condition is sufficient to allow INSN
3425 to be moved, so we no longer test it. */
3427 /* Return 1 if all insns in the basic block of INSN and following INSN
3428 that set REG are invariant according to TABLE. */
3430 static int
3431 all_sets_invariant_p (reg, insn, table)
3432 rtx reg, insn;
3433 short *table;
3435 rtx p = insn;
3436 int regno = REGNO (reg);
3438 while (1)
3440 enum rtx_code code;
3441 p = NEXT_INSN (p);
3442 code = GET_CODE (p);
3443 if (code == CODE_LABEL || code == JUMP_INSN)
3444 return 1;
3445 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3446 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3447 && REGNO (SET_DEST (PATTERN (p))) == regno)
3449 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3450 return 0;
3454 #endif /* 0 */
3456 /* Look at all uses (not sets) of registers in X. For each, if it is
3457 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3458 a different insn, set USAGE[REGNO] to const0_rtx. */
3460 static void
3461 find_single_use_in_loop (regs, insn, x)
3462 struct loop_regs *regs;
3463 rtx insn;
3464 rtx x;
3466 enum rtx_code code = GET_CODE (x);
3467 const char *fmt = GET_RTX_FORMAT (code);
3468 int i, j;
3470 if (code == REG)
3471 regs->array[REGNO (x)].single_usage
3472 = (regs->array[REGNO (x)].single_usage != 0
3473 && regs->array[REGNO (x)].single_usage != insn)
3474 ? const0_rtx : insn;
3476 else if (code == SET)
3478 /* Don't count SET_DEST if it is a REG; otherwise count things
3479 in SET_DEST because if a register is partially modified, it won't
3480 show up as a potential movable so we don't care how USAGE is set
3481 for it. */
3482 if (GET_CODE (SET_DEST (x)) != REG)
3483 find_single_use_in_loop (regs, insn, SET_DEST (x));
3484 find_single_use_in_loop (regs, insn, SET_SRC (x));
3486 else
3487 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3489 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3490 find_single_use_in_loop (regs, insn, XEXP (x, i));
3491 else if (fmt[i] == 'E')
3492 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3493 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3497 /* Count and record any set in X which is contained in INSN. Update
3498 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3499 in X. */
3501 static void
3502 count_one_set (regs, insn, x, last_set)
3503 struct loop_regs *regs;
3504 rtx insn, x;
3505 rtx *last_set;
3507 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3508 /* Don't move a reg that has an explicit clobber.
3509 It's not worth the pain to try to do it correctly. */
3510 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3512 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3514 rtx dest = SET_DEST (x);
3515 while (GET_CODE (dest) == SUBREG
3516 || GET_CODE (dest) == ZERO_EXTRACT
3517 || GET_CODE (dest) == SIGN_EXTRACT
3518 || GET_CODE (dest) == STRICT_LOW_PART)
3519 dest = XEXP (dest, 0);
3520 if (GET_CODE (dest) == REG)
3522 int i;
3523 int regno = REGNO (dest);
3524 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3526 /* If this is the first setting of this reg
3527 in current basic block, and it was set before,
3528 it must be set in two basic blocks, so it cannot
3529 be moved out of the loop. */
3530 if (regs->array[regno].set_in_loop > 0
3531 && last_set == 0)
3532 regs->array[regno+i].may_not_optimize = 1;
3533 /* If this is not first setting in current basic block,
3534 see if reg was used in between previous one and this.
3535 If so, neither one can be moved. */
3536 if (last_set[regno] != 0
3537 && reg_used_between_p (dest, last_set[regno], insn))
3538 regs->array[regno+i].may_not_optimize = 1;
3539 if (regs->array[regno+i].set_in_loop < 127)
3540 ++regs->array[regno+i].set_in_loop;
3541 last_set[regno+i] = insn;
3547 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3548 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3549 contained in insn INSN is used by any insn that precedes INSN in
3550 cyclic order starting from the loop entry point.
3552 We don't want to use INSN_LUID here because if we restrict INSN to those
3553 that have a valid INSN_LUID, it means we cannot move an invariant out
3554 from an inner loop past two loops. */
3556 static int
3557 loop_reg_used_before_p (loop, set, insn)
3558 const struct loop *loop;
3559 rtx set, insn;
3561 rtx reg = SET_DEST (set);
3562 rtx p;
3564 /* Scan forward checking for register usage. If we hit INSN, we
3565 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3566 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3568 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3569 return 1;
3571 if (p == loop->end)
3572 p = loop->start;
3575 return 0;
3579 /* Information we collect about arrays that we might want to prefetch. */
3580 struct prefetch_info
3582 struct iv_class *class; /* Class this prefetch is based on. */
3583 struct induction *giv; /* GIV this prefetch is based on. */
3584 rtx base_address; /* Start prefetching from this address plus
3585 index. */
3586 HOST_WIDE_INT index;
3587 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3588 iteration. */
3589 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3590 prefetch area in one iteration. */
3591 unsigned int total_bytes; /* Total bytes loop will access in this block.
3592 This is set only for loops with known
3593 iteration counts and is 0xffffffff
3594 otherwise. */
3595 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3596 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3597 unsigned int write : 1; /* 1 for read/write prefetches. */
3600 /* Data used by check_store function. */
3601 struct check_store_data
3603 rtx mem_address;
3604 int mem_write;
3607 static void check_store PARAMS ((rtx, rtx, void *));
3608 static void emit_prefetch_instructions PARAMS ((struct loop *));
3609 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3611 /* Set mem_write when mem_address is found. Used as callback to
3612 note_stores. */
3613 static void
3614 check_store (x, pat, data)
3615 rtx x, pat ATTRIBUTE_UNUSED;
3616 void *data;
3618 struct check_store_data *d = (struct check_store_data *) data;
3620 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3621 d->mem_write = 1;
3624 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3625 important to get some addresses combined. Later more sophisticated
3626 transformations can be added when necessary.
3628 ??? Same trick with swapping operand is done at several other places.
3629 It can be nice to develop some common way to handle this. */
3631 static int
3632 rtx_equal_for_prefetch_p (x, y)
3633 rtx x, y;
3635 int i;
3636 int j;
3637 enum rtx_code code = GET_CODE (x);
3638 const char *fmt;
3640 if (x == y)
3641 return 1;
3642 if (code != GET_CODE (y))
3643 return 0;
3645 code = GET_CODE (x);
3647 if (GET_RTX_CLASS (code) == 'c')
3649 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3650 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3651 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3652 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3654 /* Compare the elements. If any pair of corresponding elements fails to
3655 match, return 0 for the whole thing. */
3657 fmt = GET_RTX_FORMAT (code);
3658 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3660 switch (fmt[i])
3662 case 'w':
3663 if (XWINT (x, i) != XWINT (y, i))
3664 return 0;
3665 break;
3667 case 'i':
3668 if (XINT (x, i) != XINT (y, i))
3669 return 0;
3670 break;
3672 case 'E':
3673 /* Two vectors must have the same length. */
3674 if (XVECLEN (x, i) != XVECLEN (y, i))
3675 return 0;
3677 /* And the corresponding elements must match. */
3678 for (j = 0; j < XVECLEN (x, i); j++)
3679 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3680 XVECEXP (y, i, j)) == 0)
3681 return 0;
3682 break;
3684 case 'e':
3685 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3686 return 0;
3687 break;
3689 case 's':
3690 if (strcmp (XSTR (x, i), XSTR (y, i)))
3691 return 0;
3692 break;
3694 case 'u':
3695 /* These are just backpointers, so they don't matter. */
3696 break;
3698 case '0':
3699 break;
3701 /* It is believed that rtx's at this level will never
3702 contain anything but integers and other rtx's,
3703 except for within LABEL_REFs and SYMBOL_REFs. */
3704 default:
3705 abort ();
3708 return 1;
3711 /* Remove constant addition value from the expression X (when present)
3712 and return it. */
3714 static HOST_WIDE_INT
3715 remove_constant_addition (x)
3716 rtx *x;
3718 HOST_WIDE_INT addval = 0;
3719 rtx exp = *x;
3721 /* Avoid clobbering a shared CONST expression. */
3722 if (GET_CODE (exp) == CONST)
3724 if (GET_CODE (XEXP (exp, 0)) == PLUS
3725 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3726 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3728 *x = XEXP (XEXP (exp, 0), 0);
3729 return INTVAL (XEXP (XEXP (exp, 0), 1));
3731 return 0;
3734 if (GET_CODE (exp) == CONST_INT)
3736 addval = INTVAL (exp);
3737 *x = const0_rtx;
3740 /* For plus expression recurse on ourself. */
3741 else if (GET_CODE (exp) == PLUS)
3743 addval += remove_constant_addition (&XEXP (exp, 0));
3744 addval += remove_constant_addition (&XEXP (exp, 1));
3746 /* In case our parameter was constant, remove extra zero from the
3747 expression. */
3748 if (XEXP (exp, 0) == const0_rtx)
3749 *x = XEXP (exp, 1);
3750 else if (XEXP (exp, 1) == const0_rtx)
3751 *x = XEXP (exp, 0);
3754 return addval;
3757 /* Attempt to identify accesses to arrays that are most likely to cause cache
3758 misses, and emit prefetch instructions a few prefetch blocks forward.
3760 To detect the arrays we use the GIV information that was collected by the
3761 strength reduction pass.
3763 The prefetch instructions are generated after the GIV information is done
3764 and before the strength reduction process. The new GIVs are injected into
3765 the strength reduction tables, so the prefetch addresses are optimized as
3766 well.
3768 GIVs are split into base address, stride, and constant addition values.
3769 GIVs with the same address, stride and close addition values are combined
3770 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3771 for write instructions can be used for the block we write to, on machines
3772 that support write prefetches.
3774 Several heuristics are used to determine when to prefetch. They are
3775 controlled by defined symbols that can be overridden for each target. */
3777 static void
3778 emit_prefetch_instructions (loop)
3779 struct loop *loop;
3781 int num_prefetches = 0;
3782 int num_real_prefetches = 0;
3783 int num_real_write_prefetches = 0;
3784 int num_prefetches_before = 0;
3785 int num_write_prefetches_before = 0;
3786 int ahead = 0;
3787 int i;
3788 struct iv_class *bl;
3789 struct induction *iv;
3790 struct prefetch_info info[MAX_PREFETCHES];
3791 struct loop_ivs *ivs = LOOP_IVS (loop);
3793 if (!HAVE_prefetch)
3794 return;
3796 /* Consider only loops w/o calls. When a call is done, the loop is probably
3797 slow enough to read the memory. */
3798 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3800 if (loop_dump_stream)
3801 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3803 return;
3806 /* Don't prefetch in loops known to have few iterations. */
3807 if (PREFETCH_NO_LOW_LOOPCNT
3808 && LOOP_INFO (loop)->n_iterations
3809 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3811 if (loop_dump_stream)
3812 fprintf (loop_dump_stream,
3813 "Prefetch: ignoring loop: not enough iterations.\n");
3814 return;
3817 /* Search all induction variables and pick those interesting for the prefetch
3818 machinery. */
3819 for (bl = ivs->list; bl; bl = bl->next)
3821 struct induction *biv = bl->biv, *biv1;
3822 int basestride = 0;
3824 biv1 = biv;
3826 /* Expect all BIVs to be executed in each iteration. This makes our
3827 analysis more conservative. */
3828 while (biv1)
3830 /* Discard non-constant additions that we can't handle well yet, and
3831 BIVs that are executed multiple times; such BIVs ought to be
3832 handled in the nested loop. We accept not_every_iteration BIVs,
3833 since these only result in larger strides and make our
3834 heuristics more conservative. */
3835 if (GET_CODE (biv->add_val) != CONST_INT)
3837 if (loop_dump_stream)
3839 fprintf (loop_dump_stream,
3840 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3841 REGNO (biv->src_reg), INSN_UID (biv->insn));
3842 print_rtl (loop_dump_stream, biv->add_val);
3843 fprintf (loop_dump_stream, "\n");
3845 break;
3848 if (biv->maybe_multiple)
3850 if (loop_dump_stream)
3852 fprintf (loop_dump_stream,
3853 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3854 REGNO (biv->src_reg), INSN_UID (biv->insn));
3855 print_rtl (loop_dump_stream, biv->add_val);
3856 fprintf (loop_dump_stream, "\n");
3858 break;
3861 basestride += INTVAL (biv1->add_val);
3862 biv1 = biv1->next_iv;
3865 if (biv1 || !basestride)
3866 continue;
3868 for (iv = bl->giv; iv; iv = iv->next_iv)
3870 rtx address;
3871 rtx temp;
3872 HOST_WIDE_INT index = 0;
3873 int add = 1;
3874 HOST_WIDE_INT stride = 0;
3875 int stride_sign = 1;
3876 struct check_store_data d;
3877 const char *ignore_reason = NULL;
3878 int size = GET_MODE_SIZE (GET_MODE (iv));
3880 /* See whether an induction variable is interesting to us and if
3881 not, report the reason. */
3882 if (iv->giv_type != DEST_ADDR)
3883 ignore_reason = "giv is not a destination address";
3885 /* We are interested only in constant stride memory references
3886 in order to be able to compute density easily. */
3887 else if (GET_CODE (iv->mult_val) != CONST_INT)
3888 ignore_reason = "stride is not constant";
3890 else
3892 stride = INTVAL (iv->mult_val) * basestride;
3893 if (stride < 0)
3895 stride = -stride;
3896 stride_sign = -1;
3899 /* On some targets, reversed order prefetches are not
3900 worthwhile. */
3901 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3902 ignore_reason = "reversed order stride";
3904 /* Prefetch of accesses with an extreme stride might not be
3905 worthwhile, either. */
3906 else if (PREFETCH_NO_EXTREME_STRIDE
3907 && stride > PREFETCH_EXTREME_STRIDE)
3908 ignore_reason = "extreme stride";
3910 /* Ignore GIVs with varying add values; we can't predict the
3911 value for the next iteration. */
3912 else if (!loop_invariant_p (loop, iv->add_val))
3913 ignore_reason = "giv has varying add value";
3915 /* Ignore GIVs in the nested loops; they ought to have been
3916 handled already. */
3917 else if (iv->maybe_multiple)
3918 ignore_reason = "giv is in nested loop";
3921 if (ignore_reason != NULL)
3923 if (loop_dump_stream)
3924 fprintf (loop_dump_stream,
3925 "Prefetch: ignoring giv at %d: %s.\n",
3926 INSN_UID (iv->insn), ignore_reason);
3927 continue;
3930 /* Determine the pointer to the basic array we are examining. It is
3931 the sum of the BIV's initial value and the GIV's add_val. */
3932 address = copy_rtx (iv->add_val);
3933 temp = copy_rtx (bl->initial_value);
3935 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3936 index = remove_constant_addition (&address);
3938 d.mem_write = 0;
3939 d.mem_address = *iv->location;
3941 /* When the GIV is not always executed, we might be better off by
3942 not dirtying the cache pages. */
3943 if (PREFETCH_CONDITIONAL || iv->always_executed)
3944 note_stores (PATTERN (iv->insn), check_store, &d);
3945 else
3947 if (loop_dump_stream)
3948 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3949 INSN_UID (iv->insn), "in conditional code.");
3950 continue;
3953 /* Attempt to find another prefetch to the same array and see if we
3954 can merge this one. */
3955 for (i = 0; i < num_prefetches; i++)
3956 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3957 && stride == info[i].stride)
3959 /* In case both access same array (same location
3960 just with small difference in constant indexes), merge
3961 the prefetches. Just do the later and the earlier will
3962 get prefetched from previous iteration.
3963 The artificial threshold should not be too small,
3964 but also not bigger than small portion of memory usually
3965 traversed by single loop. */
3966 if (index >= info[i].index
3967 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3969 info[i].write |= d.mem_write;
3970 info[i].bytes_accessed += size;
3971 info[i].index = index;
3972 info[i].giv = iv;
3973 info[i].class = bl;
3974 info[num_prefetches].base_address = address;
3975 add = 0;
3976 break;
3979 if (index < info[i].index
3980 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3982 info[i].write |= d.mem_write;
3983 info[i].bytes_accessed += size;
3984 add = 0;
3985 break;
3989 /* Merging failed. */
3990 if (add)
3992 info[num_prefetches].giv = iv;
3993 info[num_prefetches].class = bl;
3994 info[num_prefetches].index = index;
3995 info[num_prefetches].stride = stride;
3996 info[num_prefetches].base_address = address;
3997 info[num_prefetches].write = d.mem_write;
3998 info[num_prefetches].bytes_accessed = size;
3999 num_prefetches++;
4000 if (num_prefetches >= MAX_PREFETCHES)
4002 if (loop_dump_stream)
4003 fprintf (loop_dump_stream,
4004 "Maximal number of prefetches exceeded.\n");
4005 return;
4011 for (i = 0; i < num_prefetches; i++)
4013 int density;
4015 /* Attempt to calculate the total number of bytes fetched by all
4016 iterations of the loop. Avoid overflow. */
4017 if (LOOP_INFO (loop)->n_iterations
4018 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4019 >= LOOP_INFO (loop)->n_iterations))
4020 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4021 else
4022 info[i].total_bytes = 0xffffffff;
4024 density = info[i].bytes_accessed * 100 / info[i].stride;
4026 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4027 if (PREFETCH_ONLY_DENSE_MEM)
4028 if (density * 256 > PREFETCH_DENSE_MEM * 100
4029 && (info[i].total_bytes / PREFETCH_BLOCK
4030 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4032 info[i].prefetch_before_loop = 1;
4033 info[i].prefetch_in_loop
4034 = (info[i].total_bytes / PREFETCH_BLOCK
4035 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4037 else
4039 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4040 if (loop_dump_stream)
4041 fprintf (loop_dump_stream,
4042 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4043 INSN_UID (info[i].giv->insn), density);
4045 else
4046 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4048 /* Find how many prefetch instructions we'll use within the loop. */
4049 if (info[i].prefetch_in_loop != 0)
4051 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4052 / PREFETCH_BLOCK);
4053 num_real_prefetches += info[i].prefetch_in_loop;
4054 if (info[i].write)
4055 num_real_write_prefetches += info[i].prefetch_in_loop;
4059 /* Determine how many iterations ahead to prefetch within the loop, based
4060 on how many prefetches we currently expect to do within the loop. */
4061 if (num_real_prefetches != 0)
4063 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4065 if (loop_dump_stream)
4066 fprintf (loop_dump_stream,
4067 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4068 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4069 num_real_prefetches = 0, num_real_write_prefetches = 0;
4072 /* We'll also use AHEAD to determine how many prefetch instructions to
4073 emit before a loop, so don't leave it zero. */
4074 if (ahead == 0)
4075 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4077 for (i = 0; i < num_prefetches; i++)
4079 /* Update if we've decided not to prefetch anything within the loop. */
4080 if (num_real_prefetches == 0)
4081 info[i].prefetch_in_loop = 0;
4083 /* Find how many prefetch instructions we'll use before the loop. */
4084 if (info[i].prefetch_before_loop != 0)
4086 int n = info[i].total_bytes / PREFETCH_BLOCK;
4087 if (n > ahead)
4088 n = ahead;
4089 info[i].prefetch_before_loop = n;
4090 num_prefetches_before += n;
4091 if (info[i].write)
4092 num_write_prefetches_before += n;
4095 if (loop_dump_stream)
4097 if (info[i].prefetch_in_loop == 0
4098 && info[i].prefetch_before_loop == 0)
4099 continue;
4100 fprintf (loop_dump_stream, "Prefetch insn: %d",
4101 INSN_UID (info[i].giv->insn));
4102 fprintf (loop_dump_stream,
4103 "; in loop: %d; before: %d; %s\n",
4104 info[i].prefetch_in_loop,
4105 info[i].prefetch_before_loop,
4106 info[i].write ? "read/write" : "read only");
4107 fprintf (loop_dump_stream,
4108 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4109 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4110 info[i].bytes_accessed, info[i].total_bytes);
4111 fprintf (loop_dump_stream, " index: ");
4112 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4113 fprintf (loop_dump_stream, "; stride: ");
4114 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4115 fprintf (loop_dump_stream, "; address: ");
4116 print_rtl (loop_dump_stream, info[i].base_address);
4117 fprintf (loop_dump_stream, "\n");
4121 if (num_real_prefetches + num_prefetches_before > 0)
4123 /* Record that this loop uses prefetch instructions. */
4124 LOOP_INFO (loop)->has_prefetch = 1;
4126 if (loop_dump_stream)
4128 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4129 num_real_prefetches, num_real_write_prefetches);
4130 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4131 num_prefetches_before, num_write_prefetches_before);
4135 for (i = 0; i < num_prefetches; i++)
4137 int y;
4139 for (y = 0; y < info[i].prefetch_in_loop; y++)
4141 rtx loc = copy_rtx (*info[i].giv->location);
4142 rtx insn;
4143 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4144 rtx before_insn = info[i].giv->insn;
4145 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4146 rtx seq;
4148 /* We can save some effort by offsetting the address on
4149 architectures with offsettable memory references. */
4150 if (offsettable_address_p (0, VOIDmode, loc))
4151 loc = plus_constant (loc, bytes_ahead);
4152 else
4154 rtx reg = gen_reg_rtx (Pmode);
4155 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4156 GEN_INT (bytes_ahead), reg,
4157 0, before_insn);
4158 loc = reg;
4161 start_sequence ();
4162 /* Make sure the address operand is valid for prefetch. */
4163 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4164 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4165 loc = force_reg (Pmode, loc);
4166 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4167 GEN_INT (3)));
4168 seq = get_insns ();
4169 end_sequence ();
4170 emit_insn_before (seq, before_insn);
4172 /* Check all insns emitted and record the new GIV
4173 information. */
4174 insn = NEXT_INSN (prev_insn);
4175 while (insn != before_insn)
4177 insn = check_insn_for_givs (loop, insn,
4178 info[i].giv->always_executed,
4179 info[i].giv->maybe_multiple);
4180 insn = NEXT_INSN (insn);
4184 if (PREFETCH_BEFORE_LOOP)
4186 /* Emit insns before the loop to fetch the first cache lines or,
4187 if we're not prefetching within the loop, everything we expect
4188 to need. */
4189 for (y = 0; y < info[i].prefetch_before_loop; y++)
4191 rtx reg = gen_reg_rtx (Pmode);
4192 rtx loop_start = loop->start;
4193 rtx init_val = info[i].class->initial_value;
4194 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4195 info[i].giv->add_val,
4196 GEN_INT (y * PREFETCH_BLOCK));
4198 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4199 non-constant INIT_VAL to have the same mode as REG, which
4200 in this case we know to be Pmode. */
4201 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4203 rtx seq;
4205 start_sequence ();
4206 init_val = convert_to_mode (Pmode, init_val, 0);
4207 seq = get_insns ();
4208 end_sequence ();
4209 loop_insn_emit_before (loop, 0, loop_start, seq);
4211 loop_iv_add_mult_emit_before (loop, init_val,
4212 info[i].giv->mult_val,
4213 add_val, reg, 0, loop_start);
4214 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4215 GEN_INT (3)),
4216 loop_start);
4221 return;
4224 /* A "basic induction variable" or biv is a pseudo reg that is set
4225 (within this loop) only by incrementing or decrementing it. */
4226 /* A "general induction variable" or giv is a pseudo reg whose
4227 value is a linear function of a biv. */
4229 /* Bivs are recognized by `basic_induction_var';
4230 Givs by `general_induction_var'. */
4232 /* Communication with routines called via `note_stores'. */
4234 static rtx note_insn;
4236 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4238 static rtx addr_placeholder;
4240 /* ??? Unfinished optimizations, and possible future optimizations,
4241 for the strength reduction code. */
4243 /* ??? The interaction of biv elimination, and recognition of 'constant'
4244 bivs, may cause problems. */
4246 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4247 performance problems.
4249 Perhaps don't eliminate things that can be combined with an addressing
4250 mode. Find all givs that have the same biv, mult_val, and add_val;
4251 then for each giv, check to see if its only use dies in a following
4252 memory address. If so, generate a new memory address and check to see
4253 if it is valid. If it is valid, then store the modified memory address,
4254 otherwise, mark the giv as not done so that it will get its own iv. */
4256 /* ??? Could try to optimize branches when it is known that a biv is always
4257 positive. */
4259 /* ??? When replace a biv in a compare insn, we should replace with closest
4260 giv so that an optimized branch can still be recognized by the combiner,
4261 e.g. the VAX acb insn. */
4263 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4264 was rerun in loop_optimize whenever a register was added or moved.
4265 Also, some of the optimizations could be a little less conservative. */
4267 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4268 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4269 callback.
4271 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4272 least once for every loop iteration except for the last one.
4274 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4275 loop iteration.
4277 void
4278 for_each_insn_in_loop (loop, fncall)
4279 struct loop *loop;
4280 loop_insn_callback fncall;
4282 int not_every_iteration = 0;
4283 int maybe_multiple = 0;
4284 int past_loop_latch = 0;
4285 int loop_depth = 0;
4286 rtx p;
4288 /* If loop_scan_start points to the loop exit test, we have to be wary of
4289 subversive use of gotos inside expression statements. */
4290 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4291 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4293 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4294 for (p = next_insn_in_loop (loop, loop->scan_start);
4295 p != NULL_RTX;
4296 p = next_insn_in_loop (loop, p))
4298 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4300 /* Past CODE_LABEL, we get to insns that may be executed multiple
4301 times. The only way we can be sure that they can't is if every
4302 jump insn between here and the end of the loop either
4303 returns, exits the loop, is a jump to a location that is still
4304 behind the label, or is a jump to the loop start. */
4306 if (GET_CODE (p) == CODE_LABEL)
4308 rtx insn = p;
4310 maybe_multiple = 0;
4312 while (1)
4314 insn = NEXT_INSN (insn);
4315 if (insn == loop->scan_start)
4316 break;
4317 if (insn == loop->end)
4319 if (loop->top != 0)
4320 insn = loop->top;
4321 else
4322 break;
4323 if (insn == loop->scan_start)
4324 break;
4327 if (GET_CODE (insn) == JUMP_INSN
4328 && GET_CODE (PATTERN (insn)) != RETURN
4329 && (!any_condjump_p (insn)
4330 || (JUMP_LABEL (insn) != 0
4331 && JUMP_LABEL (insn) != loop->scan_start
4332 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4334 maybe_multiple = 1;
4335 break;
4340 /* Past a jump, we get to insns for which we can't count
4341 on whether they will be executed during each iteration. */
4342 /* This code appears twice in strength_reduce. There is also similar
4343 code in scan_loop. */
4344 if (GET_CODE (p) == JUMP_INSN
4345 /* If we enter the loop in the middle, and scan around to the
4346 beginning, don't set not_every_iteration for that.
4347 This can be any kind of jump, since we want to know if insns
4348 will be executed if the loop is executed. */
4349 && !(JUMP_LABEL (p) == loop->top
4350 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4351 && any_uncondjump_p (p))
4352 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4354 rtx label = 0;
4356 /* If this is a jump outside the loop, then it also doesn't
4357 matter. Check to see if the target of this branch is on the
4358 loop->exits_labels list. */
4360 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4361 if (XEXP (label, 0) == JUMP_LABEL (p))
4362 break;
4364 if (!label)
4365 not_every_iteration = 1;
4368 else if (GET_CODE (p) == NOTE)
4370 /* At the virtual top of a converted loop, insns are again known to
4371 be executed each iteration: logically, the loop begins here
4372 even though the exit code has been duplicated.
4374 Insns are also again known to be executed each iteration at
4375 the LOOP_CONT note. */
4376 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4377 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4378 && loop_depth == 0)
4379 not_every_iteration = 0;
4380 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4381 loop_depth++;
4382 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4383 loop_depth--;
4386 /* Note if we pass a loop latch. If we do, then we can not clear
4387 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4388 a loop since a jump before the last CODE_LABEL may have started
4389 a new loop iteration.
4391 Note that LOOP_TOP is only set for rotated loops and we need
4392 this check for all loops, so compare against the CODE_LABEL
4393 which immediately follows LOOP_START. */
4394 if (GET_CODE (p) == JUMP_INSN
4395 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4396 past_loop_latch = 1;
4398 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4399 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4400 or not an insn is known to be executed each iteration of the
4401 loop, whether or not any iterations are known to occur.
4403 Therefore, if we have just passed a label and have no more labels
4404 between here and the test insn of the loop, and we have not passed
4405 a jump to the top of the loop, then we know these insns will be
4406 executed each iteration. */
4408 if (not_every_iteration
4409 && !past_loop_latch
4410 && GET_CODE (p) == CODE_LABEL
4411 && no_labels_between_p (p, loop->end)
4412 && loop_insn_first_p (p, loop->cont))
4413 not_every_iteration = 0;
4417 static void
4418 loop_bivs_find (loop)
4419 struct loop *loop;
4421 struct loop_regs *regs = LOOP_REGS (loop);
4422 struct loop_ivs *ivs = LOOP_IVS (loop);
4423 /* Temporary list pointers for traversing ivs->list. */
4424 struct iv_class *bl, **backbl;
4426 ivs->list = 0;
4428 for_each_insn_in_loop (loop, check_insn_for_bivs);
4430 /* Scan ivs->list to remove all regs that proved not to be bivs.
4431 Make a sanity check against regs->n_times_set. */
4432 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4434 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4435 /* Above happens if register modified by subreg, etc. */
4436 /* Make sure it is not recognized as a basic induction var: */
4437 || regs->array[bl->regno].n_times_set != bl->biv_count
4438 /* If never incremented, it is invariant that we decided not to
4439 move. So leave it alone. */
4440 || ! bl->incremented)
4442 if (loop_dump_stream)
4443 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4444 bl->regno,
4445 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4446 ? "not induction variable"
4447 : (! bl->incremented ? "never incremented"
4448 : "count error")));
4450 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4451 *backbl = bl->next;
4453 else
4455 backbl = &bl->next;
4457 if (loop_dump_stream)
4458 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4464 /* Determine how BIVS are initialized by looking through pre-header
4465 extended basic block. */
4466 static void
4467 loop_bivs_init_find (loop)
4468 struct loop *loop;
4470 struct loop_ivs *ivs = LOOP_IVS (loop);
4471 /* Temporary list pointers for traversing ivs->list. */
4472 struct iv_class *bl;
4473 int call_seen;
4474 rtx p;
4476 /* Find initial value for each biv by searching backwards from loop_start,
4477 halting at first label. Also record any test condition. */
4479 call_seen = 0;
4480 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4482 rtx test;
4484 note_insn = p;
4486 if (GET_CODE (p) == CALL_INSN)
4487 call_seen = 1;
4489 if (INSN_P (p))
4490 note_stores (PATTERN (p), record_initial, ivs);
4492 /* Record any test of a biv that branches around the loop if no store
4493 between it and the start of loop. We only care about tests with
4494 constants and registers and only certain of those. */
4495 if (GET_CODE (p) == JUMP_INSN
4496 && JUMP_LABEL (p) != 0
4497 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4498 && (test = get_condition_for_loop (loop, p)) != 0
4499 && GET_CODE (XEXP (test, 0)) == REG
4500 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4501 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4502 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4503 && bl->init_insn == 0)
4505 /* If an NE test, we have an initial value! */
4506 if (GET_CODE (test) == NE)
4508 bl->init_insn = p;
4509 bl->init_set = gen_rtx_SET (VOIDmode,
4510 XEXP (test, 0), XEXP (test, 1));
4512 else
4513 bl->initial_test = test;
4519 /* Look at the each biv and see if we can say anything better about its
4520 initial value from any initializing insns set up above. (This is done
4521 in two passes to avoid missing SETs in a PARALLEL.) */
4522 static void
4523 loop_bivs_check (loop)
4524 struct loop *loop;
4526 struct loop_ivs *ivs = LOOP_IVS (loop);
4527 /* Temporary list pointers for traversing ivs->list. */
4528 struct iv_class *bl;
4529 struct iv_class **backbl;
4531 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4533 rtx src;
4534 rtx note;
4536 if (! bl->init_insn)
4537 continue;
4539 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4540 is a constant, use the value of that. */
4541 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4542 && CONSTANT_P (XEXP (note, 0)))
4543 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4544 && CONSTANT_P (XEXP (note, 0))))
4545 src = XEXP (note, 0);
4546 else
4547 src = SET_SRC (bl->init_set);
4549 if (loop_dump_stream)
4550 fprintf (loop_dump_stream,
4551 "Biv %d: initialized at insn %d: initial value ",
4552 bl->regno, INSN_UID (bl->init_insn));
4554 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4555 || GET_MODE (src) == VOIDmode)
4556 && valid_initial_value_p (src, bl->init_insn,
4557 LOOP_INFO (loop)->pre_header_has_call,
4558 loop->start))
4560 bl->initial_value = src;
4562 if (loop_dump_stream)
4564 print_simple_rtl (loop_dump_stream, src);
4565 fputc ('\n', loop_dump_stream);
4568 /* If we can't make it a giv,
4569 let biv keep initial value of "itself". */
4570 else if (loop_dump_stream)
4571 fprintf (loop_dump_stream, "is complex\n");
4576 /* Search the loop for general induction variables. */
4578 static void
4579 loop_givs_find (loop)
4580 struct loop* loop;
4582 for_each_insn_in_loop (loop, check_insn_for_givs);
4586 /* For each giv for which we still don't know whether or not it is
4587 replaceable, check to see if it is replaceable because its final value
4588 can be calculated. */
4590 static void
4591 loop_givs_check (loop)
4592 struct loop *loop;
4594 struct loop_ivs *ivs = LOOP_IVS (loop);
4595 struct iv_class *bl;
4597 for (bl = ivs->list; bl; bl = bl->next)
4599 struct induction *v;
4601 for (v = bl->giv; v; v = v->next_iv)
4602 if (! v->replaceable && ! v->not_replaceable)
4603 check_final_value (loop, v);
4608 /* Return nonzero if it is possible to eliminate the biv BL provided
4609 all givs are reduced. This is possible if either the reg is not
4610 used outside the loop, or we can compute what its final value will
4611 be. */
4613 static int
4614 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4615 struct loop *loop;
4616 struct iv_class *bl;
4617 int threshold;
4618 int insn_count;
4620 /* For architectures with a decrement_and_branch_until_zero insn,
4621 don't do this if we put a REG_NONNEG note on the endtest for this
4622 biv. */
4624 #ifdef HAVE_decrement_and_branch_until_zero
4625 if (bl->nonneg)
4627 if (loop_dump_stream)
4628 fprintf (loop_dump_stream,
4629 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4630 return 0;
4632 #endif
4634 /* Check that biv is used outside loop or if it has a final value.
4635 Compare against bl->init_insn rather than loop->start. We aren't
4636 concerned with any uses of the biv between init_insn and
4637 loop->start since these won't be affected by the value of the biv
4638 elsewhere in the function, so long as init_insn doesn't use the
4639 biv itself. */
4641 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4642 && bl->init_insn
4643 && INSN_UID (bl->init_insn) < max_uid_for_loop
4644 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4645 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4646 || (bl->final_value = final_biv_value (loop, bl)))
4647 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4649 if (loop_dump_stream)
4651 fprintf (loop_dump_stream,
4652 "Cannot eliminate biv %d.\n",
4653 bl->regno);
4654 fprintf (loop_dump_stream,
4655 "First use: insn %d, last use: insn %d.\n",
4656 REGNO_FIRST_UID (bl->regno),
4657 REGNO_LAST_UID (bl->regno));
4659 return 0;
4663 /* Reduce each giv of BL that we have decided to reduce. */
4665 static void
4666 loop_givs_reduce (loop, bl)
4667 struct loop *loop;
4668 struct iv_class *bl;
4670 struct induction *v;
4672 for (v = bl->giv; v; v = v->next_iv)
4674 struct induction *tv;
4675 if (! v->ignore && v->same == 0)
4677 int auto_inc_opt = 0;
4679 /* If the code for derived givs immediately below has already
4680 allocated a new_reg, we must keep it. */
4681 if (! v->new_reg)
4682 v->new_reg = gen_reg_rtx (v->mode);
4684 #ifdef AUTO_INC_DEC
4685 /* If the target has auto-increment addressing modes, and
4686 this is an address giv, then try to put the increment
4687 immediately after its use, so that flow can create an
4688 auto-increment addressing mode. */
4689 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4690 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4691 /* We don't handle reversed biv's because bl->biv->insn
4692 does not have a valid INSN_LUID. */
4693 && ! bl->reversed
4694 && v->always_executed && ! v->maybe_multiple
4695 && INSN_UID (v->insn) < max_uid_for_loop)
4697 /* If other giv's have been combined with this one, then
4698 this will work only if all uses of the other giv's occur
4699 before this giv's insn. This is difficult to check.
4701 We simplify this by looking for the common case where
4702 there is one DEST_REG giv, and this giv's insn is the
4703 last use of the dest_reg of that DEST_REG giv. If the
4704 increment occurs after the address giv, then we can
4705 perform the optimization. (Otherwise, the increment
4706 would have to go before other_giv, and we would not be
4707 able to combine it with the address giv to get an
4708 auto-inc address.) */
4709 if (v->combined_with)
4711 struct induction *other_giv = 0;
4713 for (tv = bl->giv; tv; tv = tv->next_iv)
4714 if (tv->same == v)
4716 if (other_giv)
4717 break;
4718 else
4719 other_giv = tv;
4721 if (! tv && other_giv
4722 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4723 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4724 == INSN_UID (v->insn))
4725 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4726 auto_inc_opt = 1;
4728 /* Check for case where increment is before the address
4729 giv. Do this test in "loop order". */
4730 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4731 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4732 || (INSN_LUID (bl->biv->insn)
4733 > INSN_LUID (loop->scan_start))))
4734 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4735 && (INSN_LUID (loop->scan_start)
4736 < INSN_LUID (bl->biv->insn))))
4737 auto_inc_opt = -1;
4738 else
4739 auto_inc_opt = 1;
4741 #ifdef HAVE_cc0
4743 rtx prev;
4745 /* We can't put an insn immediately after one setting
4746 cc0, or immediately before one using cc0. */
4747 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4748 || (auto_inc_opt == -1
4749 && (prev = prev_nonnote_insn (v->insn)) != 0
4750 && INSN_P (prev)
4751 && sets_cc0_p (PATTERN (prev))))
4752 auto_inc_opt = 0;
4754 #endif
4756 if (auto_inc_opt)
4757 v->auto_inc_opt = 1;
4759 #endif
4761 /* For each place where the biv is incremented, add an insn
4762 to increment the new, reduced reg for the giv. */
4763 for (tv = bl->biv; tv; tv = tv->next_iv)
4765 rtx insert_before;
4767 if (! auto_inc_opt)
4768 insert_before = NEXT_INSN (tv->insn);
4769 else if (auto_inc_opt == 1)
4770 insert_before = NEXT_INSN (v->insn);
4771 else
4772 insert_before = v->insn;
4774 if (tv->mult_val == const1_rtx)
4775 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4776 v->new_reg, v->new_reg,
4777 0, insert_before);
4778 else /* tv->mult_val == const0_rtx */
4779 /* A multiply is acceptable here
4780 since this is presumed to be seldom executed. */
4781 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4782 v->add_val, v->new_reg,
4783 0, insert_before);
4786 /* Add code at loop start to initialize giv's reduced reg. */
4788 loop_iv_add_mult_hoist (loop,
4789 extend_value_for_giv (v, bl->initial_value),
4790 v->mult_val, v->add_val, v->new_reg);
4796 /* Check for givs whose first use is their definition and whose
4797 last use is the definition of another giv. If so, it is likely
4798 dead and should not be used to derive another giv nor to
4799 eliminate a biv. */
4801 static void
4802 loop_givs_dead_check (loop, bl)
4803 struct loop *loop ATTRIBUTE_UNUSED;
4804 struct iv_class *bl;
4806 struct induction *v;
4808 for (v = bl->giv; v; v = v->next_iv)
4810 if (v->ignore
4811 || (v->same && v->same->ignore))
4812 continue;
4814 if (v->giv_type == DEST_REG
4815 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4817 struct induction *v1;
4819 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4820 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4821 v->maybe_dead = 1;
4827 static void
4828 loop_givs_rescan (loop, bl, reg_map)
4829 struct loop *loop;
4830 struct iv_class *bl;
4831 rtx *reg_map;
4833 struct induction *v;
4835 for (v = bl->giv; v; v = v->next_iv)
4837 if (v->same && v->same->ignore)
4838 v->ignore = 1;
4840 if (v->ignore)
4841 continue;
4843 /* Update expression if this was combined, in case other giv was
4844 replaced. */
4845 if (v->same)
4846 v->new_reg = replace_rtx (v->new_reg,
4847 v->same->dest_reg, v->same->new_reg);
4849 /* See if this register is known to be a pointer to something. If
4850 so, see if we can find the alignment. First see if there is a
4851 destination register that is a pointer. If so, this shares the
4852 alignment too. Next see if we can deduce anything from the
4853 computational information. If not, and this is a DEST_ADDR
4854 giv, at least we know that it's a pointer, though we don't know
4855 the alignment. */
4856 if (GET_CODE (v->new_reg) == REG
4857 && v->giv_type == DEST_REG
4858 && REG_POINTER (v->dest_reg))
4859 mark_reg_pointer (v->new_reg,
4860 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4861 else if (GET_CODE (v->new_reg) == REG
4862 && REG_POINTER (v->src_reg))
4864 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4866 if (align == 0
4867 || GET_CODE (v->add_val) != CONST_INT
4868 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4869 align = 0;
4871 mark_reg_pointer (v->new_reg, align);
4873 else if (GET_CODE (v->new_reg) == REG
4874 && GET_CODE (v->add_val) == REG
4875 && REG_POINTER (v->add_val))
4877 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4879 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4880 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4881 align = 0;
4883 mark_reg_pointer (v->new_reg, align);
4885 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4886 mark_reg_pointer (v->new_reg, 0);
4888 if (v->giv_type == DEST_ADDR)
4889 /* Store reduced reg as the address in the memref where we found
4890 this giv. */
4891 validate_change (v->insn, v->location, v->new_reg, 0);
4892 else if (v->replaceable)
4894 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4896 else
4898 rtx original_insn = v->insn;
4899 rtx note;
4901 /* Not replaceable; emit an insn to set the original giv reg from
4902 the reduced giv, same as above. */
4903 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4904 gen_move_insn (v->dest_reg,
4905 v->new_reg));
4907 /* The original insn may have a REG_EQUAL note. This note is
4908 now incorrect and may result in invalid substitutions later.
4909 The original insn is dead, but may be part of a libcall
4910 sequence, which doesn't seem worth the bother of handling. */
4911 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4912 if (note)
4913 remove_note (original_insn, note);
4916 /* When a loop is reversed, givs which depend on the reversed
4917 biv, and which are live outside the loop, must be set to their
4918 correct final value. This insn is only needed if the giv is
4919 not replaceable. The correct final value is the same as the
4920 value that the giv starts the reversed loop with. */
4921 if (bl->reversed && ! v->replaceable)
4922 loop_iv_add_mult_sink (loop,
4923 extend_value_for_giv (v, bl->initial_value),
4924 v->mult_val, v->add_val, v->dest_reg);
4925 else if (v->final_value)
4926 loop_insn_sink_or_swim (loop,
4927 gen_load_of_final_value (v->dest_reg,
4928 v->final_value));
4930 if (loop_dump_stream)
4932 fprintf (loop_dump_stream, "giv at %d reduced to ",
4933 INSN_UID (v->insn));
4934 print_simple_rtl (loop_dump_stream, v->new_reg);
4935 fprintf (loop_dump_stream, "\n");
4941 static int
4942 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4943 struct loop *loop ATTRIBUTE_UNUSED;
4944 struct iv_class *bl;
4945 struct induction *v;
4946 rtx test_reg;
4948 int add_cost;
4949 int benefit;
4951 benefit = v->benefit;
4952 PUT_MODE (test_reg, v->mode);
4953 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4954 test_reg, test_reg);
4956 /* Reduce benefit if not replaceable, since we will insert a
4957 move-insn to replace the insn that calculates this giv. Don't do
4958 this unless the giv is a user variable, since it will often be
4959 marked non-replaceable because of the duplication of the exit
4960 code outside the loop. In such a case, the copies we insert are
4961 dead and will be deleted. So they don't have a cost. Similar
4962 situations exist. */
4963 /* ??? The new final_[bg]iv_value code does a much better job of
4964 finding replaceable giv's, and hence this code may no longer be
4965 necessary. */
4966 if (! v->replaceable && ! bl->eliminable
4967 && REG_USERVAR_P (v->dest_reg))
4968 benefit -= copy_cost;
4970 /* Decrease the benefit to count the add-insns that we will insert
4971 to increment the reduced reg for the giv. ??? This can
4972 overestimate the run-time cost of the additional insns, e.g. if
4973 there are multiple basic blocks that increment the biv, but only
4974 one of these blocks is executed during each iteration. There is
4975 no good way to detect cases like this with the current structure
4976 of the loop optimizer. This code is more accurate for
4977 determining code size than run-time benefits. */
4978 benefit -= add_cost * bl->biv_count;
4980 /* Decide whether to strength-reduce this giv or to leave the code
4981 unchanged (recompute it from the biv each time it is used). This
4982 decision can be made independently for each giv. */
4984 #ifdef AUTO_INC_DEC
4985 /* Attempt to guess whether autoincrement will handle some of the
4986 new add insns; if so, increase BENEFIT (undo the subtraction of
4987 add_cost that was done above). */
4988 if (v->giv_type == DEST_ADDR
4989 /* Increasing the benefit is risky, since this is only a guess.
4990 Avoid increasing register pressure in cases where there would
4991 be no other benefit from reducing this giv. */
4992 && benefit > 0
4993 && GET_CODE (v->mult_val) == CONST_INT)
4995 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4997 if (HAVE_POST_INCREMENT
4998 && INTVAL (v->mult_val) == size)
4999 benefit += add_cost * bl->biv_count;
5000 else if (HAVE_PRE_INCREMENT
5001 && INTVAL (v->mult_val) == size)
5002 benefit += add_cost * bl->biv_count;
5003 else if (HAVE_POST_DECREMENT
5004 && -INTVAL (v->mult_val) == size)
5005 benefit += add_cost * bl->biv_count;
5006 else if (HAVE_PRE_DECREMENT
5007 && -INTVAL (v->mult_val) == size)
5008 benefit += add_cost * bl->biv_count;
5010 #endif
5012 return benefit;
5016 /* Free IV structures for LOOP. */
5018 static void
5019 loop_ivs_free (loop)
5020 struct loop *loop;
5022 struct loop_ivs *ivs = LOOP_IVS (loop);
5023 struct iv_class *iv = ivs->list;
5025 free (ivs->regs);
5027 while (iv)
5029 struct iv_class *next = iv->next;
5030 struct induction *induction;
5031 struct induction *next_induction;
5033 for (induction = iv->biv; induction; induction = next_induction)
5035 next_induction = induction->next_iv;
5036 free (induction);
5038 for (induction = iv->giv; induction; induction = next_induction)
5040 next_induction = induction->next_iv;
5041 free (induction);
5044 free (iv);
5045 iv = next;
5050 /* Perform strength reduction and induction variable elimination.
5052 Pseudo registers created during this function will be beyond the
5053 last valid index in several tables including
5054 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5055 problem here, because the added registers cannot be givs outside of
5056 their loop, and hence will never be reconsidered. But scan_loop
5057 must check regnos to make sure they are in bounds. */
5059 static void
5060 strength_reduce (loop, flags)
5061 struct loop *loop;
5062 int flags;
5064 struct loop_info *loop_info = LOOP_INFO (loop);
5065 struct loop_regs *regs = LOOP_REGS (loop);
5066 struct loop_ivs *ivs = LOOP_IVS (loop);
5067 rtx p;
5068 /* Temporary list pointer for traversing ivs->list. */
5069 struct iv_class *bl;
5070 /* Ratio of extra register life span we can justify
5071 for saving an instruction. More if loop doesn't call subroutines
5072 since in that case saving an insn makes more difference
5073 and more registers are available. */
5074 /* ??? could set this to last value of threshold in move_movables */
5075 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5076 /* Map of pseudo-register replacements. */
5077 rtx *reg_map = NULL;
5078 int reg_map_size;
5079 int unrolled_insn_copies = 0;
5080 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5081 int insn_count = count_insns_in_loop (loop);
5083 addr_placeholder = gen_reg_rtx (Pmode);
5085 ivs->n_regs = max_reg_before_loop;
5086 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5088 /* Find all BIVs in loop. */
5089 loop_bivs_find (loop);
5091 /* Exit if there are no bivs. */
5092 if (! ivs->list)
5094 /* Can still unroll the loop anyways, but indicate that there is no
5095 strength reduction info available. */
5096 if (flags & LOOP_UNROLL)
5097 unroll_loop (loop, insn_count, 0);
5099 loop_ivs_free (loop);
5100 return;
5103 /* Determine how BIVS are initialized by looking through pre-header
5104 extended basic block. */
5105 loop_bivs_init_find (loop);
5107 /* Look at the each biv and see if we can say anything better about its
5108 initial value from any initializing insns set up above. */
5109 loop_bivs_check (loop);
5111 /* Search the loop for general induction variables. */
5112 loop_givs_find (loop);
5114 /* Try to calculate and save the number of loop iterations. This is
5115 set to zero if the actual number can not be calculated. This must
5116 be called after all giv's have been identified, since otherwise it may
5117 fail if the iteration variable is a giv. */
5118 loop_iterations (loop);
5120 #ifdef HAVE_prefetch
5121 if (flags & LOOP_PREFETCH)
5122 emit_prefetch_instructions (loop);
5123 #endif
5125 /* Now for each giv for which we still don't know whether or not it is
5126 replaceable, check to see if it is replaceable because its final value
5127 can be calculated. This must be done after loop_iterations is called,
5128 so that final_giv_value will work correctly. */
5129 loop_givs_check (loop);
5131 /* Try to prove that the loop counter variable (if any) is always
5132 nonnegative; if so, record that fact with a REG_NONNEG note
5133 so that "decrement and branch until zero" insn can be used. */
5134 check_dbra_loop (loop, insn_count);
5136 /* Create reg_map to hold substitutions for replaceable giv regs.
5137 Some givs might have been made from biv increments, so look at
5138 ivs->reg_iv_type for a suitable size. */
5139 reg_map_size = ivs->n_regs;
5140 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5142 /* Examine each iv class for feasibility of strength reduction/induction
5143 variable elimination. */
5145 for (bl = ivs->list; bl; bl = bl->next)
5147 struct induction *v;
5148 int benefit;
5150 /* Test whether it will be possible to eliminate this biv
5151 provided all givs are reduced. */
5152 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5154 /* This will be true at the end, if all givs which depend on this
5155 biv have been strength reduced.
5156 We can't (currently) eliminate the biv unless this is so. */
5157 bl->all_reduced = 1;
5159 /* Check each extension dependent giv in this class to see if its
5160 root biv is safe from wrapping in the interior mode. */
5161 check_ext_dependent_givs (bl, loop_info);
5163 /* Combine all giv's for this iv_class. */
5164 combine_givs (regs, bl);
5166 for (v = bl->giv; v; v = v->next_iv)
5168 struct induction *tv;
5170 if (v->ignore || v->same)
5171 continue;
5173 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5175 /* If an insn is not to be strength reduced, then set its ignore
5176 flag, and clear bl->all_reduced. */
5178 /* A giv that depends on a reversed biv must be reduced if it is
5179 used after the loop exit, otherwise, it would have the wrong
5180 value after the loop exit. To make it simple, just reduce all
5181 of such giv's whether or not we know they are used after the loop
5182 exit. */
5184 if (! flag_reduce_all_givs
5185 && v->lifetime * threshold * benefit < insn_count
5186 && ! bl->reversed)
5188 if (loop_dump_stream)
5189 fprintf (loop_dump_stream,
5190 "giv of insn %d not worth while, %d vs %d.\n",
5191 INSN_UID (v->insn),
5192 v->lifetime * threshold * benefit, insn_count);
5193 v->ignore = 1;
5194 bl->all_reduced = 0;
5196 else
5198 /* Check that we can increment the reduced giv without a
5199 multiply insn. If not, reject it. */
5201 for (tv = bl->biv; tv; tv = tv->next_iv)
5202 if (tv->mult_val == const1_rtx
5203 && ! product_cheap_p (tv->add_val, v->mult_val))
5205 if (loop_dump_stream)
5206 fprintf (loop_dump_stream,
5207 "giv of insn %d: would need a multiply.\n",
5208 INSN_UID (v->insn));
5209 v->ignore = 1;
5210 bl->all_reduced = 0;
5211 break;
5216 /* Check for givs whose first use is their definition and whose
5217 last use is the definition of another giv. If so, it is likely
5218 dead and should not be used to derive another giv nor to
5219 eliminate a biv. */
5220 loop_givs_dead_check (loop, bl);
5222 /* Reduce each giv that we decided to reduce. */
5223 loop_givs_reduce (loop, bl);
5225 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5226 as not reduced.
5228 For each giv register that can be reduced now: if replaceable,
5229 substitute reduced reg wherever the old giv occurs;
5230 else add new move insn "giv_reg = reduced_reg". */
5231 loop_givs_rescan (loop, bl, reg_map);
5233 /* All the givs based on the biv bl have been reduced if they
5234 merit it. */
5236 /* For each giv not marked as maybe dead that has been combined with a
5237 second giv, clear any "maybe dead" mark on that second giv.
5238 v->new_reg will either be or refer to the register of the giv it
5239 combined with.
5241 Doing this clearing avoids problems in biv elimination where
5242 a giv's new_reg is a complex value that can't be put in the
5243 insn but the giv combined with (with a reg as new_reg) is
5244 marked maybe_dead. Since the register will be used in either
5245 case, we'd prefer it be used from the simpler giv. */
5247 for (v = bl->giv; v; v = v->next_iv)
5248 if (! v->maybe_dead && v->same)
5249 v->same->maybe_dead = 0;
5251 /* Try to eliminate the biv, if it is a candidate.
5252 This won't work if ! bl->all_reduced,
5253 since the givs we planned to use might not have been reduced.
5255 We have to be careful that we didn't initially think we could
5256 eliminate this biv because of a giv that we now think may be
5257 dead and shouldn't be used as a biv replacement.
5259 Also, there is the possibility that we may have a giv that looks
5260 like it can be used to eliminate a biv, but the resulting insn
5261 isn't valid. This can happen, for example, on the 88k, where a
5262 JUMP_INSN can compare a register only with zero. Attempts to
5263 replace it with a compare with a constant will fail.
5265 Note that in cases where this call fails, we may have replaced some
5266 of the occurrences of the biv with a giv, but no harm was done in
5267 doing so in the rare cases where it can occur. */
5269 if (bl->all_reduced == 1 && bl->eliminable
5270 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5272 /* ?? If we created a new test to bypass the loop entirely,
5273 or otherwise drop straight in, based on this test, then
5274 we might want to rewrite it also. This way some later
5275 pass has more hope of removing the initialization of this
5276 biv entirely. */
5278 /* If final_value != 0, then the biv may be used after loop end
5279 and we must emit an insn to set it just in case.
5281 Reversed bivs already have an insn after the loop setting their
5282 value, so we don't need another one. We can't calculate the
5283 proper final value for such a biv here anyways. */
5284 if (bl->final_value && ! bl->reversed)
5285 loop_insn_sink_or_swim (loop,
5286 gen_load_of_final_value (bl->biv->dest_reg,
5287 bl->final_value));
5289 if (loop_dump_stream)
5290 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5291 bl->regno);
5293 /* See above note wrt final_value. But since we couldn't eliminate
5294 the biv, we must set the value after the loop instead of before. */
5295 else if (bl->final_value && ! bl->reversed)
5296 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5297 bl->final_value));
5300 /* Go through all the instructions in the loop, making all the
5301 register substitutions scheduled in REG_MAP. */
5303 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5304 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5305 || GET_CODE (p) == CALL_INSN)
5307 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5308 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5309 INSN_CODE (p) = -1;
5312 if (loop_info->n_iterations > 0)
5314 /* When we completely unroll a loop we will likely not need the increment
5315 of the loop BIV and we will not need the conditional branch at the
5316 end of the loop. */
5317 unrolled_insn_copies = insn_count - 2;
5319 #ifdef HAVE_cc0
5320 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5321 need the comparison before the conditional branch at the end of the
5322 loop. */
5323 unrolled_insn_copies -= 1;
5324 #endif
5326 /* We'll need one copy for each loop iteration. */
5327 unrolled_insn_copies *= loop_info->n_iterations;
5329 /* A little slop to account for the ability to remove initialization
5330 code, better CSE, and other secondary benefits of completely
5331 unrolling some loops. */
5332 unrolled_insn_copies -= 1;
5334 /* Clamp the value. */
5335 if (unrolled_insn_copies < 0)
5336 unrolled_insn_copies = 0;
5339 /* Unroll loops from within strength reduction so that we can use the
5340 induction variable information that strength_reduce has already
5341 collected. Always unroll loops that would be as small or smaller
5342 unrolled than when rolled. */
5343 if ((flags & LOOP_UNROLL)
5344 || ((flags & LOOP_AUTO_UNROLL)
5345 && loop_info->n_iterations > 0
5346 && unrolled_insn_copies <= insn_count))
5347 unroll_loop (loop, insn_count, 1);
5349 #ifdef HAVE_doloop_end
5350 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5351 doloop_optimize (loop);
5352 #endif /* HAVE_doloop_end */
5354 /* In case number of iterations is known, drop branch prediction note
5355 in the branch. Do that only in second loop pass, as loop unrolling
5356 may change the number of iterations performed. */
5357 if (flags & LOOP_BCT)
5359 unsigned HOST_WIDE_INT n
5360 = loop_info->n_iterations / loop_info->unroll_number;
5361 if (n > 1)
5362 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
5363 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5366 if (loop_dump_stream)
5367 fprintf (loop_dump_stream, "\n");
5369 loop_ivs_free (loop);
5370 if (reg_map)
5371 free (reg_map);
5374 /*Record all basic induction variables calculated in the insn. */
5375 static rtx
5376 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5377 struct loop *loop;
5378 rtx p;
5379 int not_every_iteration;
5380 int maybe_multiple;
5382 struct loop_ivs *ivs = LOOP_IVS (loop);
5383 rtx set;
5384 rtx dest_reg;
5385 rtx inc_val;
5386 rtx mult_val;
5387 rtx *location;
5389 if (GET_CODE (p) == INSN
5390 && (set = single_set (p))
5391 && GET_CODE (SET_DEST (set)) == REG)
5393 dest_reg = SET_DEST (set);
5394 if (REGNO (dest_reg) < max_reg_before_loop
5395 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5396 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5398 if (basic_induction_var (loop, SET_SRC (set),
5399 GET_MODE (SET_SRC (set)),
5400 dest_reg, p, &inc_val, &mult_val,
5401 &location))
5403 /* It is a possible basic induction variable.
5404 Create and initialize an induction structure for it. */
5406 struct induction *v
5407 = (struct induction *) xmalloc (sizeof (struct induction));
5409 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5410 not_every_iteration, maybe_multiple);
5411 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5413 else if (REGNO (dest_reg) < ivs->n_regs)
5414 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5417 return p;
5420 /* Record all givs calculated in the insn.
5421 A register is a giv if: it is only set once, it is a function of a
5422 biv and a constant (or invariant), and it is not a biv. */
5423 static rtx
5424 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5425 struct loop *loop;
5426 rtx p;
5427 int not_every_iteration;
5428 int maybe_multiple;
5430 struct loop_regs *regs = LOOP_REGS (loop);
5432 rtx set;
5433 /* Look for a general induction variable in a register. */
5434 if (GET_CODE (p) == INSN
5435 && (set = single_set (p))
5436 && GET_CODE (SET_DEST (set)) == REG
5437 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5439 rtx src_reg;
5440 rtx dest_reg;
5441 rtx add_val;
5442 rtx mult_val;
5443 rtx ext_val;
5444 int benefit;
5445 rtx regnote = 0;
5446 rtx last_consec_insn;
5448 dest_reg = SET_DEST (set);
5449 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5450 return p;
5452 if (/* SET_SRC is a giv. */
5453 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5454 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5455 /* Equivalent expression is a giv. */
5456 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5457 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5458 &add_val, &mult_val, &ext_val, 0,
5459 &benefit, VOIDmode)))
5460 /* Don't try to handle any regs made by loop optimization.
5461 We have nothing on them in regno_first_uid, etc. */
5462 && REGNO (dest_reg) < max_reg_before_loop
5463 /* Don't recognize a BASIC_INDUCT_VAR here. */
5464 && dest_reg != src_reg
5465 /* This must be the only place where the register is set. */
5466 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5467 /* or all sets must be consecutive and make a giv. */
5468 || (benefit = consec_sets_giv (loop, benefit, p,
5469 src_reg, dest_reg,
5470 &add_val, &mult_val, &ext_val,
5471 &last_consec_insn))))
5473 struct induction *v
5474 = (struct induction *) xmalloc (sizeof (struct induction));
5476 /* If this is a library call, increase benefit. */
5477 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5478 benefit += libcall_benefit (p);
5480 /* Skip the consecutive insns, if there are any. */
5481 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5482 p = last_consec_insn;
5484 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5485 ext_val, benefit, DEST_REG, not_every_iteration,
5486 maybe_multiple, (rtx*) 0);
5491 #ifndef DONT_REDUCE_ADDR
5492 /* Look for givs which are memory addresses. */
5493 /* This resulted in worse code on a VAX 8600. I wonder if it
5494 still does. */
5495 if (GET_CODE (p) == INSN)
5496 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5497 maybe_multiple);
5498 #endif
5500 /* Update the status of whether giv can derive other givs. This can
5501 change when we pass a label or an insn that updates a biv. */
5502 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5503 || GET_CODE (p) == CODE_LABEL)
5504 update_giv_derive (loop, p);
5505 return p;
5508 /* Return 1 if X is a valid source for an initial value (or as value being
5509 compared against in an initial test).
5511 X must be either a register or constant and must not be clobbered between
5512 the current insn and the start of the loop.
5514 INSN is the insn containing X. */
5516 static int
5517 valid_initial_value_p (x, insn, call_seen, loop_start)
5518 rtx x;
5519 rtx insn;
5520 int call_seen;
5521 rtx loop_start;
5523 if (CONSTANT_P (x))
5524 return 1;
5526 /* Only consider pseudos we know about initialized in insns whose luids
5527 we know. */
5528 if (GET_CODE (x) != REG
5529 || REGNO (x) >= max_reg_before_loop)
5530 return 0;
5532 /* Don't use call-clobbered registers across a call which clobbers it. On
5533 some machines, don't use any hard registers at all. */
5534 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5535 && (SMALL_REGISTER_CLASSES
5536 || (call_used_regs[REGNO (x)] && call_seen)))
5537 return 0;
5539 /* Don't use registers that have been clobbered before the start of the
5540 loop. */
5541 if (reg_set_between_p (x, insn, loop_start))
5542 return 0;
5544 return 1;
5547 /* Scan X for memory refs and check each memory address
5548 as a possible giv. INSN is the insn whose pattern X comes from.
5549 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5550 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5551 more than once in each loop iteration. */
5553 static void
5554 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5555 const struct loop *loop;
5556 rtx x;
5557 rtx insn;
5558 int not_every_iteration, maybe_multiple;
5560 int i, j;
5561 enum rtx_code code;
5562 const char *fmt;
5564 if (x == 0)
5565 return;
5567 code = GET_CODE (x);
5568 switch (code)
5570 case REG:
5571 case CONST_INT:
5572 case CONST:
5573 case CONST_DOUBLE:
5574 case SYMBOL_REF:
5575 case LABEL_REF:
5576 case PC:
5577 case CC0:
5578 case ADDR_VEC:
5579 case ADDR_DIFF_VEC:
5580 case USE:
5581 case CLOBBER:
5582 return;
5584 case MEM:
5586 rtx src_reg;
5587 rtx add_val;
5588 rtx mult_val;
5589 rtx ext_val;
5590 int benefit;
5592 /* This code used to disable creating GIVs with mult_val == 1 and
5593 add_val == 0. However, this leads to lost optimizations when
5594 it comes time to combine a set of related DEST_ADDR GIVs, since
5595 this one would not be seen. */
5597 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5598 &mult_val, &ext_val, 1, &benefit,
5599 GET_MODE (x)))
5601 /* Found one; record it. */
5602 struct induction *v
5603 = (struct induction *) xmalloc (sizeof (struct induction));
5605 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5606 add_val, ext_val, benefit, DEST_ADDR,
5607 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5609 v->mem = x;
5612 return;
5614 default:
5615 break;
5618 /* Recursively scan the subexpressions for other mem refs. */
5620 fmt = GET_RTX_FORMAT (code);
5621 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5622 if (fmt[i] == 'e')
5623 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5624 maybe_multiple);
5625 else if (fmt[i] == 'E')
5626 for (j = 0; j < XVECLEN (x, i); j++)
5627 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5628 maybe_multiple);
5631 /* Fill in the data about one biv update.
5632 V is the `struct induction' in which we record the biv. (It is
5633 allocated by the caller, with alloca.)
5634 INSN is the insn that sets it.
5635 DEST_REG is the biv's reg.
5637 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5638 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5639 being set to INC_VAL.
5641 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5642 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5643 can be executed more than once per iteration. If MAYBE_MULTIPLE
5644 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5645 executed exactly once per iteration. */
5647 static void
5648 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5649 not_every_iteration, maybe_multiple)
5650 struct loop *loop;
5651 struct induction *v;
5652 rtx insn;
5653 rtx dest_reg;
5654 rtx inc_val;
5655 rtx mult_val;
5656 rtx *location;
5657 int not_every_iteration;
5658 int maybe_multiple;
5660 struct loop_ivs *ivs = LOOP_IVS (loop);
5661 struct iv_class *bl;
5663 v->insn = insn;
5664 v->src_reg = dest_reg;
5665 v->dest_reg = dest_reg;
5666 v->mult_val = mult_val;
5667 v->add_val = inc_val;
5668 v->ext_dependent = NULL_RTX;
5669 v->location = location;
5670 v->mode = GET_MODE (dest_reg);
5671 v->always_computable = ! not_every_iteration;
5672 v->always_executed = ! not_every_iteration;
5673 v->maybe_multiple = maybe_multiple;
5675 /* Add this to the reg's iv_class, creating a class
5676 if this is the first incrementation of the reg. */
5678 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5679 if (bl == 0)
5681 /* Create and initialize new iv_class. */
5683 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5685 bl->regno = REGNO (dest_reg);
5686 bl->biv = 0;
5687 bl->giv = 0;
5688 bl->biv_count = 0;
5689 bl->giv_count = 0;
5691 /* Set initial value to the reg itself. */
5692 bl->initial_value = dest_reg;
5693 bl->final_value = 0;
5694 /* We haven't seen the initializing insn yet */
5695 bl->init_insn = 0;
5696 bl->init_set = 0;
5697 bl->initial_test = 0;
5698 bl->incremented = 0;
5699 bl->eliminable = 0;
5700 bl->nonneg = 0;
5701 bl->reversed = 0;
5702 bl->total_benefit = 0;
5704 /* Add this class to ivs->list. */
5705 bl->next = ivs->list;
5706 ivs->list = bl;
5708 /* Put it in the array of biv register classes. */
5709 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5712 /* Update IV_CLASS entry for this biv. */
5713 v->next_iv = bl->biv;
5714 bl->biv = v;
5715 bl->biv_count++;
5716 if (mult_val == const1_rtx)
5717 bl->incremented = 1;
5719 if (loop_dump_stream)
5720 loop_biv_dump (v, loop_dump_stream, 0);
5723 /* Fill in the data about one giv.
5724 V is the `struct induction' in which we record the giv. (It is
5725 allocated by the caller, with alloca.)
5726 INSN is the insn that sets it.
5727 BENEFIT estimates the savings from deleting this insn.
5728 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5729 into a register or is used as a memory address.
5731 SRC_REG is the biv reg which the giv is computed from.
5732 DEST_REG is the giv's reg (if the giv is stored in a reg).
5733 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5734 LOCATION points to the place where this giv's value appears in INSN. */
5736 static void
5737 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5738 benefit, type, not_every_iteration, maybe_multiple, location)
5739 const struct loop *loop;
5740 struct induction *v;
5741 rtx insn;
5742 rtx src_reg;
5743 rtx dest_reg;
5744 rtx mult_val, add_val, ext_val;
5745 int benefit;
5746 enum g_types type;
5747 int not_every_iteration, maybe_multiple;
5748 rtx *location;
5750 struct loop_ivs *ivs = LOOP_IVS (loop);
5751 struct induction *b;
5752 struct iv_class *bl;
5753 rtx set = single_set (insn);
5754 rtx temp;
5756 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5757 undo the MULT canonicalization that we performed earlier. */
5758 temp = simplify_rtx (add_val);
5759 if (temp
5760 && ! (GET_CODE (add_val) == MULT
5761 && GET_CODE (temp) == ASHIFT))
5762 add_val = temp;
5764 v->insn = insn;
5765 v->src_reg = src_reg;
5766 v->giv_type = type;
5767 v->dest_reg = dest_reg;
5768 v->mult_val = mult_val;
5769 v->add_val = add_val;
5770 v->ext_dependent = ext_val;
5771 v->benefit = benefit;
5772 v->location = location;
5773 v->cant_derive = 0;
5774 v->combined_with = 0;
5775 v->maybe_multiple = maybe_multiple;
5776 v->maybe_dead = 0;
5777 v->derive_adjustment = 0;
5778 v->same = 0;
5779 v->ignore = 0;
5780 v->new_reg = 0;
5781 v->final_value = 0;
5782 v->same_insn = 0;
5783 v->auto_inc_opt = 0;
5784 v->unrolled = 0;
5785 v->shared = 0;
5787 /* The v->always_computable field is used in update_giv_derive, to
5788 determine whether a giv can be used to derive another giv. For a
5789 DEST_REG giv, INSN computes a new value for the giv, so its value
5790 isn't computable if INSN insn't executed every iteration.
5791 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5792 it does not compute a new value. Hence the value is always computable
5793 regardless of whether INSN is executed each iteration. */
5795 if (type == DEST_ADDR)
5796 v->always_computable = 1;
5797 else
5798 v->always_computable = ! not_every_iteration;
5800 v->always_executed = ! not_every_iteration;
5802 if (type == DEST_ADDR)
5804 v->mode = GET_MODE (*location);
5805 v->lifetime = 1;
5807 else /* type == DEST_REG */
5809 v->mode = GET_MODE (SET_DEST (set));
5811 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5813 /* If the lifetime is zero, it means that this register is
5814 really a dead store. So mark this as a giv that can be
5815 ignored. This will not prevent the biv from being eliminated. */
5816 if (v->lifetime == 0)
5817 v->ignore = 1;
5819 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5820 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5823 /* Add the giv to the class of givs computed from one biv. */
5825 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5826 if (bl)
5828 v->next_iv = bl->giv;
5829 bl->giv = v;
5830 /* Don't count DEST_ADDR. This is supposed to count the number of
5831 insns that calculate givs. */
5832 if (type == DEST_REG)
5833 bl->giv_count++;
5834 bl->total_benefit += benefit;
5836 else
5837 /* Fatal error, biv missing for this giv? */
5838 abort ();
5840 if (type == DEST_ADDR)
5842 v->replaceable = 1;
5843 v->not_replaceable = 0;
5845 else
5847 /* The giv can be replaced outright by the reduced register only if all
5848 of the following conditions are true:
5849 - the insn that sets the giv is always executed on any iteration
5850 on which the giv is used at all
5851 (there are two ways to deduce this:
5852 either the insn is executed on every iteration,
5853 or all uses follow that insn in the same basic block),
5854 - the giv is not used outside the loop
5855 - no assignments to the biv occur during the giv's lifetime. */
5857 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5858 /* Previous line always fails if INSN was moved by loop opt. */
5859 && REGNO_LAST_LUID (REGNO (dest_reg))
5860 < INSN_LUID (loop->end)
5861 && (! not_every_iteration
5862 || last_use_this_basic_block (dest_reg, insn)))
5864 /* Now check that there are no assignments to the biv within the
5865 giv's lifetime. This requires two separate checks. */
5867 /* Check each biv update, and fail if any are between the first
5868 and last use of the giv.
5870 If this loop contains an inner loop that was unrolled, then
5871 the insn modifying the biv may have been emitted by the loop
5872 unrolling code, and hence does not have a valid luid. Just
5873 mark the biv as not replaceable in this case. It is not very
5874 useful as a biv, because it is used in two different loops.
5875 It is very unlikely that we would be able to optimize the giv
5876 using this biv anyways. */
5878 v->replaceable = 1;
5879 v->not_replaceable = 0;
5880 for (b = bl->biv; b; b = b->next_iv)
5882 if (INSN_UID (b->insn) >= max_uid_for_loop
5883 || ((INSN_LUID (b->insn)
5884 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5885 && (INSN_LUID (b->insn)
5886 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5888 v->replaceable = 0;
5889 v->not_replaceable = 1;
5890 break;
5894 /* If there are any backwards branches that go from after the
5895 biv update to before it, then this giv is not replaceable. */
5896 if (v->replaceable)
5897 for (b = bl->biv; b; b = b->next_iv)
5898 if (back_branch_in_range_p (loop, b->insn))
5900 v->replaceable = 0;
5901 v->not_replaceable = 1;
5902 break;
5905 else
5907 /* May still be replaceable, we don't have enough info here to
5908 decide. */
5909 v->replaceable = 0;
5910 v->not_replaceable = 0;
5914 /* Record whether the add_val contains a const_int, for later use by
5915 combine_givs. */
5917 rtx tem = add_val;
5919 v->no_const_addval = 1;
5920 if (tem == const0_rtx)
5922 else if (CONSTANT_P (add_val))
5923 v->no_const_addval = 0;
5924 if (GET_CODE (tem) == PLUS)
5926 while (1)
5928 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5929 tem = XEXP (tem, 0);
5930 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5931 tem = XEXP (tem, 1);
5932 else
5933 break;
5935 if (CONSTANT_P (XEXP (tem, 1)))
5936 v->no_const_addval = 0;
5940 if (loop_dump_stream)
5941 loop_giv_dump (v, loop_dump_stream, 0);
5944 /* All this does is determine whether a giv can be made replaceable because
5945 its final value can be calculated. This code can not be part of record_giv
5946 above, because final_giv_value requires that the number of loop iterations
5947 be known, and that can not be accurately calculated until after all givs
5948 have been identified. */
5950 static void
5951 check_final_value (loop, v)
5952 const struct loop *loop;
5953 struct induction *v;
5955 rtx final_value = 0;
5957 /* DEST_ADDR givs will never reach here, because they are always marked
5958 replaceable above in record_giv. */
5960 /* The giv can be replaced outright by the reduced register only if all
5961 of the following conditions are true:
5962 - the insn that sets the giv is always executed on any iteration
5963 on which the giv is used at all
5964 (there are two ways to deduce this:
5965 either the insn is executed on every iteration,
5966 or all uses follow that insn in the same basic block),
5967 - its final value can be calculated (this condition is different
5968 than the one above in record_giv)
5969 - it's not used before the it's set
5970 - no assignments to the biv occur during the giv's lifetime. */
5972 #if 0
5973 /* This is only called now when replaceable is known to be false. */
5974 /* Clear replaceable, so that it won't confuse final_giv_value. */
5975 v->replaceable = 0;
5976 #endif
5978 if ((final_value = final_giv_value (loop, v))
5979 && (v->always_executed
5980 || last_use_this_basic_block (v->dest_reg, v->insn)))
5982 int biv_increment_seen = 0, before_giv_insn = 0;
5983 rtx p = v->insn;
5984 rtx last_giv_use;
5986 v->replaceable = 1;
5987 v->not_replaceable = 0;
5989 /* When trying to determine whether or not a biv increment occurs
5990 during the lifetime of the giv, we can ignore uses of the variable
5991 outside the loop because final_value is true. Hence we can not
5992 use regno_last_uid and regno_first_uid as above in record_giv. */
5994 /* Search the loop to determine whether any assignments to the
5995 biv occur during the giv's lifetime. Start with the insn
5996 that sets the giv, and search around the loop until we come
5997 back to that insn again.
5999 Also fail if there is a jump within the giv's lifetime that jumps
6000 to somewhere outside the lifetime but still within the loop. This
6001 catches spaghetti code where the execution order is not linear, and
6002 hence the above test fails. Here we assume that the giv lifetime
6003 does not extend from one iteration of the loop to the next, so as
6004 to make the test easier. Since the lifetime isn't known yet,
6005 this requires two loops. See also record_giv above. */
6007 last_giv_use = v->insn;
6009 while (1)
6011 p = NEXT_INSN (p);
6012 if (p == loop->end)
6014 before_giv_insn = 1;
6015 p = NEXT_INSN (loop->start);
6017 if (p == v->insn)
6018 break;
6020 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
6021 || GET_CODE (p) == CALL_INSN)
6023 /* It is possible for the BIV increment to use the GIV if we
6024 have a cycle. Thus we must be sure to check each insn for
6025 both BIV and GIV uses, and we must check for BIV uses
6026 first. */
6028 if (! biv_increment_seen
6029 && reg_set_p (v->src_reg, PATTERN (p)))
6030 biv_increment_seen = 1;
6032 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6034 if (biv_increment_seen || before_giv_insn)
6036 v->replaceable = 0;
6037 v->not_replaceable = 1;
6038 break;
6040 last_giv_use = p;
6045 /* Now that the lifetime of the giv is known, check for branches
6046 from within the lifetime to outside the lifetime if it is still
6047 replaceable. */
6049 if (v->replaceable)
6051 p = v->insn;
6052 while (1)
6054 p = NEXT_INSN (p);
6055 if (p == loop->end)
6056 p = NEXT_INSN (loop->start);
6057 if (p == last_giv_use)
6058 break;
6060 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6061 && LABEL_NAME (JUMP_LABEL (p))
6062 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6063 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6064 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6065 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6067 v->replaceable = 0;
6068 v->not_replaceable = 1;
6070 if (loop_dump_stream)
6071 fprintf (loop_dump_stream,
6072 "Found branch outside giv lifetime.\n");
6074 break;
6079 /* If it is replaceable, then save the final value. */
6080 if (v->replaceable)
6081 v->final_value = final_value;
6084 if (loop_dump_stream && v->replaceable)
6085 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6086 INSN_UID (v->insn), REGNO (v->dest_reg));
6089 /* Update the status of whether a giv can derive other givs.
6091 We need to do something special if there is or may be an update to the biv
6092 between the time the giv is defined and the time it is used to derive
6093 another giv.
6095 In addition, a giv that is only conditionally set is not allowed to
6096 derive another giv once a label has been passed.
6098 The cases we look at are when a label or an update to a biv is passed. */
6100 static void
6101 update_giv_derive (loop, p)
6102 const struct loop *loop;
6103 rtx p;
6105 struct loop_ivs *ivs = LOOP_IVS (loop);
6106 struct iv_class *bl;
6107 struct induction *biv, *giv;
6108 rtx tem;
6109 int dummy;
6111 /* Search all IV classes, then all bivs, and finally all givs.
6113 There are three cases we are concerned with. First we have the situation
6114 of a giv that is only updated conditionally. In that case, it may not
6115 derive any givs after a label is passed.
6117 The second case is when a biv update occurs, or may occur, after the
6118 definition of a giv. For certain biv updates (see below) that are
6119 known to occur between the giv definition and use, we can adjust the
6120 giv definition. For others, or when the biv update is conditional,
6121 we must prevent the giv from deriving any other givs. There are two
6122 sub-cases within this case.
6124 If this is a label, we are concerned with any biv update that is done
6125 conditionally, since it may be done after the giv is defined followed by
6126 a branch here (actually, we need to pass both a jump and a label, but
6127 this extra tracking doesn't seem worth it).
6129 If this is a jump, we are concerned about any biv update that may be
6130 executed multiple times. We are actually only concerned about
6131 backward jumps, but it is probably not worth performing the test
6132 on the jump again here.
6134 If this is a biv update, we must adjust the giv status to show that a
6135 subsequent biv update was performed. If this adjustment cannot be done,
6136 the giv cannot derive further givs. */
6138 for (bl = ivs->list; bl; bl = bl->next)
6139 for (biv = bl->biv; biv; biv = biv->next_iv)
6140 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6141 || biv->insn == p)
6143 for (giv = bl->giv; giv; giv = giv->next_iv)
6145 /* If cant_derive is already true, there is no point in
6146 checking all of these conditions again. */
6147 if (giv->cant_derive)
6148 continue;
6150 /* If this giv is conditionally set and we have passed a label,
6151 it cannot derive anything. */
6152 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6153 giv->cant_derive = 1;
6155 /* Skip givs that have mult_val == 0, since
6156 they are really invariants. Also skip those that are
6157 replaceable, since we know their lifetime doesn't contain
6158 any biv update. */
6159 else if (giv->mult_val == const0_rtx || giv->replaceable)
6160 continue;
6162 /* The only way we can allow this giv to derive another
6163 is if this is a biv increment and we can form the product
6164 of biv->add_val and giv->mult_val. In this case, we will
6165 be able to compute a compensation. */
6166 else if (biv->insn == p)
6168 rtx ext_val_dummy;
6170 tem = 0;
6171 if (biv->mult_val == const1_rtx)
6172 tem = simplify_giv_expr (loop,
6173 gen_rtx_MULT (giv->mode,
6174 biv->add_val,
6175 giv->mult_val),
6176 &ext_val_dummy, &dummy);
6178 if (tem && giv->derive_adjustment)
6179 tem = simplify_giv_expr
6180 (loop,
6181 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6182 &ext_val_dummy, &dummy);
6184 if (tem)
6185 giv->derive_adjustment = tem;
6186 else
6187 giv->cant_derive = 1;
6189 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6190 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6191 giv->cant_derive = 1;
6196 /* Check whether an insn is an increment legitimate for a basic induction var.
6197 X is the source of insn P, or a part of it.
6198 MODE is the mode in which X should be interpreted.
6200 DEST_REG is the putative biv, also the destination of the insn.
6201 We accept patterns of these forms:
6202 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6203 REG = INVARIANT + REG
6205 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6206 store the additive term into *INC_VAL, and store the place where
6207 we found the additive term into *LOCATION.
6209 If X is an assignment of an invariant into DEST_REG, we set
6210 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6212 We also want to detect a BIV when it corresponds to a variable
6213 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6214 of the variable may be a PLUS that adds a SUBREG of that variable to
6215 an invariant and then sign- or zero-extends the result of the PLUS
6216 into the variable.
6218 Most GIVs in such cases will be in the promoted mode, since that is the
6219 probably the natural computation mode (and almost certainly the mode
6220 used for addresses) on the machine. So we view the pseudo-reg containing
6221 the variable as the BIV, as if it were simply incremented.
6223 Note that treating the entire pseudo as a BIV will result in making
6224 simple increments to any GIVs based on it. However, if the variable
6225 overflows in its declared mode but not its promoted mode, the result will
6226 be incorrect. This is acceptable if the variable is signed, since
6227 overflows in such cases are undefined, but not if it is unsigned, since
6228 those overflows are defined. So we only check for SIGN_EXTEND and
6229 not ZERO_EXTEND.
6231 If we cannot find a biv, we return 0. */
6233 static int
6234 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6235 const struct loop *loop;
6236 rtx x;
6237 enum machine_mode mode;
6238 rtx dest_reg;
6239 rtx p;
6240 rtx *inc_val;
6241 rtx *mult_val;
6242 rtx **location;
6244 enum rtx_code code;
6245 rtx *argp, arg;
6246 rtx insn, set = 0;
6248 code = GET_CODE (x);
6249 *location = NULL;
6250 switch (code)
6252 case PLUS:
6253 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6254 || (GET_CODE (XEXP (x, 0)) == SUBREG
6255 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6256 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6258 argp = &XEXP (x, 1);
6260 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6261 || (GET_CODE (XEXP (x, 1)) == SUBREG
6262 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6263 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6265 argp = &XEXP (x, 0);
6267 else
6268 return 0;
6270 arg = *argp;
6271 if (loop_invariant_p (loop, arg) != 1)
6272 return 0;
6274 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6275 *mult_val = const1_rtx;
6276 *location = argp;
6277 return 1;
6279 case SUBREG:
6280 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6281 handle addition of promoted variables.
6282 ??? The comment at the start of this function is wrong: promoted
6283 variable increments don't look like it says they do. */
6284 return basic_induction_var (loop, SUBREG_REG (x),
6285 GET_MODE (SUBREG_REG (x)),
6286 dest_reg, p, inc_val, mult_val, location);
6288 case REG:
6289 /* If this register is assigned in a previous insn, look at its
6290 source, but don't go outside the loop or past a label. */
6292 /* If this sets a register to itself, we would repeat any previous
6293 biv increment if we applied this strategy blindly. */
6294 if (rtx_equal_p (dest_reg, x))
6295 return 0;
6297 insn = p;
6298 while (1)
6300 rtx dest;
6303 insn = PREV_INSN (insn);
6305 while (insn && GET_CODE (insn) == NOTE
6306 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6308 if (!insn)
6309 break;
6310 set = single_set (insn);
6311 if (set == 0)
6312 break;
6313 dest = SET_DEST (set);
6314 if (dest == x
6315 || (GET_CODE (dest) == SUBREG
6316 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6317 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6318 && SUBREG_REG (dest) == x))
6319 return basic_induction_var (loop, SET_SRC (set),
6320 (GET_MODE (SET_SRC (set)) == VOIDmode
6321 ? GET_MODE (x)
6322 : GET_MODE (SET_SRC (set))),
6323 dest_reg, insn,
6324 inc_val, mult_val, location);
6326 while (GET_CODE (dest) == SIGN_EXTRACT
6327 || GET_CODE (dest) == ZERO_EXTRACT
6328 || GET_CODE (dest) == SUBREG
6329 || GET_CODE (dest) == STRICT_LOW_PART)
6330 dest = XEXP (dest, 0);
6331 if (dest == x)
6332 break;
6334 /* Fall through. */
6336 /* Can accept constant setting of biv only when inside inner most loop.
6337 Otherwise, a biv of an inner loop may be incorrectly recognized
6338 as a biv of the outer loop,
6339 causing code to be moved INTO the inner loop. */
6340 case MEM:
6341 if (loop_invariant_p (loop, x) != 1)
6342 return 0;
6343 case CONST_INT:
6344 case SYMBOL_REF:
6345 case CONST:
6346 /* convert_modes aborts if we try to convert to or from CCmode, so just
6347 exclude that case. It is very unlikely that a condition code value
6348 would be a useful iterator anyways. convert_modes aborts if we try to
6349 convert a float mode to non-float or vice versa too. */
6350 if (loop->level == 1
6351 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6352 && GET_MODE_CLASS (mode) != MODE_CC)
6354 /* Possible bug here? Perhaps we don't know the mode of X. */
6355 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6356 *mult_val = const0_rtx;
6357 return 1;
6359 else
6360 return 0;
6362 case SIGN_EXTEND:
6363 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6364 dest_reg, p, inc_val, mult_val, location);
6366 case ASHIFTRT:
6367 /* Similar, since this can be a sign extension. */
6368 for (insn = PREV_INSN (p);
6369 (insn && GET_CODE (insn) == NOTE
6370 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6371 insn = PREV_INSN (insn))
6374 if (insn)
6375 set = single_set (insn);
6377 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6378 && set && SET_DEST (set) == XEXP (x, 0)
6379 && GET_CODE (XEXP (x, 1)) == CONST_INT
6380 && INTVAL (XEXP (x, 1)) >= 0
6381 && GET_CODE (SET_SRC (set)) == ASHIFT
6382 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6383 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6384 GET_MODE (XEXP (x, 0)),
6385 dest_reg, insn, inc_val, mult_val,
6386 location);
6387 return 0;
6389 default:
6390 return 0;
6394 /* A general induction variable (giv) is any quantity that is a linear
6395 function of a basic induction variable,
6396 i.e. giv = biv * mult_val + add_val.
6397 The coefficients can be any loop invariant quantity.
6398 A giv need not be computed directly from the biv;
6399 it can be computed by way of other givs. */
6401 /* Determine whether X computes a giv.
6402 If it does, return a nonzero value
6403 which is the benefit from eliminating the computation of X;
6404 set *SRC_REG to the register of the biv that it is computed from;
6405 set *ADD_VAL and *MULT_VAL to the coefficients,
6406 such that the value of X is biv * mult + add; */
6408 static int
6409 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6410 is_addr, pbenefit, addr_mode)
6411 const struct loop *loop;
6412 rtx x;
6413 rtx *src_reg;
6414 rtx *add_val;
6415 rtx *mult_val;
6416 rtx *ext_val;
6417 int is_addr;
6418 int *pbenefit;
6419 enum machine_mode addr_mode;
6421 struct loop_ivs *ivs = LOOP_IVS (loop);
6422 rtx orig_x = x;
6424 /* If this is an invariant, forget it, it isn't a giv. */
6425 if (loop_invariant_p (loop, x) == 1)
6426 return 0;
6428 *pbenefit = 0;
6429 *ext_val = NULL_RTX;
6430 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6431 if (x == 0)
6432 return 0;
6434 switch (GET_CODE (x))
6436 case USE:
6437 case CONST_INT:
6438 /* Since this is now an invariant and wasn't before, it must be a giv
6439 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6440 with. */
6441 *src_reg = ivs->list->biv->dest_reg;
6442 *mult_val = const0_rtx;
6443 *add_val = x;
6444 break;
6446 case REG:
6447 /* This is equivalent to a BIV. */
6448 *src_reg = x;
6449 *mult_val = const1_rtx;
6450 *add_val = const0_rtx;
6451 break;
6453 case PLUS:
6454 /* Either (plus (biv) (invar)) or
6455 (plus (mult (biv) (invar_1)) (invar_2)). */
6456 if (GET_CODE (XEXP (x, 0)) == MULT)
6458 *src_reg = XEXP (XEXP (x, 0), 0);
6459 *mult_val = XEXP (XEXP (x, 0), 1);
6461 else
6463 *src_reg = XEXP (x, 0);
6464 *mult_val = const1_rtx;
6466 *add_val = XEXP (x, 1);
6467 break;
6469 case MULT:
6470 /* ADD_VAL is zero. */
6471 *src_reg = XEXP (x, 0);
6472 *mult_val = XEXP (x, 1);
6473 *add_val = const0_rtx;
6474 break;
6476 default:
6477 abort ();
6480 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6481 unless they are CONST_INT). */
6482 if (GET_CODE (*add_val) == USE)
6483 *add_val = XEXP (*add_val, 0);
6484 if (GET_CODE (*mult_val) == USE)
6485 *mult_val = XEXP (*mult_val, 0);
6487 if (is_addr)
6488 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6489 else
6490 *pbenefit += rtx_cost (orig_x, SET);
6492 /* Always return true if this is a giv so it will be detected as such,
6493 even if the benefit is zero or negative. This allows elimination
6494 of bivs that might otherwise not be eliminated. */
6495 return 1;
6498 /* Given an expression, X, try to form it as a linear function of a biv.
6499 We will canonicalize it to be of the form
6500 (plus (mult (BIV) (invar_1))
6501 (invar_2))
6502 with possible degeneracies.
6504 The invariant expressions must each be of a form that can be used as a
6505 machine operand. We surround then with a USE rtx (a hack, but localized
6506 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6507 routine; it is the caller's responsibility to strip them.
6509 If no such canonicalization is possible (i.e., two biv's are used or an
6510 expression that is neither invariant nor a biv or giv), this routine
6511 returns 0.
6513 For a nonzero return, the result will have a code of CONST_INT, USE,
6514 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6516 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6518 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6519 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6521 static rtx
6522 simplify_giv_expr (loop, x, ext_val, benefit)
6523 const struct loop *loop;
6524 rtx x;
6525 rtx *ext_val;
6526 int *benefit;
6528 struct loop_ivs *ivs = LOOP_IVS (loop);
6529 struct loop_regs *regs = LOOP_REGS (loop);
6530 enum machine_mode mode = GET_MODE (x);
6531 rtx arg0, arg1;
6532 rtx tem;
6534 /* If this is not an integer mode, or if we cannot do arithmetic in this
6535 mode, this can't be a giv. */
6536 if (mode != VOIDmode
6537 && (GET_MODE_CLASS (mode) != MODE_INT
6538 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6539 return NULL_RTX;
6541 switch (GET_CODE (x))
6543 case PLUS:
6544 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6545 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6546 if (arg0 == 0 || arg1 == 0)
6547 return NULL_RTX;
6549 /* Put constant last, CONST_INT last if both constant. */
6550 if ((GET_CODE (arg0) == USE
6551 || GET_CODE (arg0) == CONST_INT)
6552 && ! ((GET_CODE (arg0) == USE
6553 && GET_CODE (arg1) == USE)
6554 || GET_CODE (arg1) == CONST_INT))
6555 tem = arg0, arg0 = arg1, arg1 = tem;
6557 /* Handle addition of zero, then addition of an invariant. */
6558 if (arg1 == const0_rtx)
6559 return arg0;
6560 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6561 switch (GET_CODE (arg0))
6563 case CONST_INT:
6564 case USE:
6565 /* Adding two invariants must result in an invariant, so enclose
6566 addition operation inside a USE and return it. */
6567 if (GET_CODE (arg0) == USE)
6568 arg0 = XEXP (arg0, 0);
6569 if (GET_CODE (arg1) == USE)
6570 arg1 = XEXP (arg1, 0);
6572 if (GET_CODE (arg0) == CONST_INT)
6573 tem = arg0, arg0 = arg1, arg1 = tem;
6574 if (GET_CODE (arg1) == CONST_INT)
6575 tem = sge_plus_constant (arg0, arg1);
6576 else
6577 tem = sge_plus (mode, arg0, arg1);
6579 if (GET_CODE (tem) != CONST_INT)
6580 tem = gen_rtx_USE (mode, tem);
6581 return tem;
6583 case REG:
6584 case MULT:
6585 /* biv + invar or mult + invar. Return sum. */
6586 return gen_rtx_PLUS (mode, arg0, arg1);
6588 case PLUS:
6589 /* (a + invar_1) + invar_2. Associate. */
6590 return
6591 simplify_giv_expr (loop,
6592 gen_rtx_PLUS (mode,
6593 XEXP (arg0, 0),
6594 gen_rtx_PLUS (mode,
6595 XEXP (arg0, 1),
6596 arg1)),
6597 ext_val, benefit);
6599 default:
6600 abort ();
6603 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6604 MULT to reduce cases. */
6605 if (GET_CODE (arg0) == REG)
6606 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6607 if (GET_CODE (arg1) == REG)
6608 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6610 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6611 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6612 Recurse to associate the second PLUS. */
6613 if (GET_CODE (arg1) == MULT)
6614 tem = arg0, arg0 = arg1, arg1 = tem;
6616 if (GET_CODE (arg1) == PLUS)
6617 return
6618 simplify_giv_expr (loop,
6619 gen_rtx_PLUS (mode,
6620 gen_rtx_PLUS (mode, arg0,
6621 XEXP (arg1, 0)),
6622 XEXP (arg1, 1)),
6623 ext_val, benefit);
6625 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6626 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6627 return NULL_RTX;
6629 if (!rtx_equal_p (arg0, arg1))
6630 return NULL_RTX;
6632 return simplify_giv_expr (loop,
6633 gen_rtx_MULT (mode,
6634 XEXP (arg0, 0),
6635 gen_rtx_PLUS (mode,
6636 XEXP (arg0, 1),
6637 XEXP (arg1, 1))),
6638 ext_val, benefit);
6640 case MINUS:
6641 /* Handle "a - b" as "a + b * (-1)". */
6642 return simplify_giv_expr (loop,
6643 gen_rtx_PLUS (mode,
6644 XEXP (x, 0),
6645 gen_rtx_MULT (mode,
6646 XEXP (x, 1),
6647 constm1_rtx)),
6648 ext_val, benefit);
6650 case MULT:
6651 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6652 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6653 if (arg0 == 0 || arg1 == 0)
6654 return NULL_RTX;
6656 /* Put constant last, CONST_INT last if both constant. */
6657 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6658 && GET_CODE (arg1) != CONST_INT)
6659 tem = arg0, arg0 = arg1, arg1 = tem;
6661 /* If second argument is not now constant, not giv. */
6662 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6663 return NULL_RTX;
6665 /* Handle multiply by 0 or 1. */
6666 if (arg1 == const0_rtx)
6667 return const0_rtx;
6669 else if (arg1 == const1_rtx)
6670 return arg0;
6672 switch (GET_CODE (arg0))
6674 case REG:
6675 /* biv * invar. Done. */
6676 return gen_rtx_MULT (mode, arg0, arg1);
6678 case CONST_INT:
6679 /* Product of two constants. */
6680 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6682 case USE:
6683 /* invar * invar is a giv, but attempt to simplify it somehow. */
6684 if (GET_CODE (arg1) != CONST_INT)
6685 return NULL_RTX;
6687 arg0 = XEXP (arg0, 0);
6688 if (GET_CODE (arg0) == MULT)
6690 /* (invar_0 * invar_1) * invar_2. Associate. */
6691 return simplify_giv_expr (loop,
6692 gen_rtx_MULT (mode,
6693 XEXP (arg0, 0),
6694 gen_rtx_MULT (mode,
6695 XEXP (arg0,
6697 arg1)),
6698 ext_val, benefit);
6700 /* Propagate the MULT expressions to the intermost nodes. */
6701 else if (GET_CODE (arg0) == PLUS)
6703 /* (invar_0 + invar_1) * invar_2. Distribute. */
6704 return simplify_giv_expr (loop,
6705 gen_rtx_PLUS (mode,
6706 gen_rtx_MULT (mode,
6707 XEXP (arg0,
6709 arg1),
6710 gen_rtx_MULT (mode,
6711 XEXP (arg0,
6713 arg1)),
6714 ext_val, benefit);
6716 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6718 case MULT:
6719 /* (a * invar_1) * invar_2. Associate. */
6720 return simplify_giv_expr (loop,
6721 gen_rtx_MULT (mode,
6722 XEXP (arg0, 0),
6723 gen_rtx_MULT (mode,
6724 XEXP (arg0, 1),
6725 arg1)),
6726 ext_val, benefit);
6728 case PLUS:
6729 /* (a + invar_1) * invar_2. Distribute. */
6730 return simplify_giv_expr (loop,
6731 gen_rtx_PLUS (mode,
6732 gen_rtx_MULT (mode,
6733 XEXP (arg0, 0),
6734 arg1),
6735 gen_rtx_MULT (mode,
6736 XEXP (arg0, 1),
6737 arg1)),
6738 ext_val, benefit);
6740 default:
6741 abort ();
6744 case ASHIFT:
6745 /* Shift by constant is multiply by power of two. */
6746 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6747 return 0;
6749 return
6750 simplify_giv_expr (loop,
6751 gen_rtx_MULT (mode,
6752 XEXP (x, 0),
6753 GEN_INT ((HOST_WIDE_INT) 1
6754 << INTVAL (XEXP (x, 1)))),
6755 ext_val, benefit);
6757 case NEG:
6758 /* "-a" is "a * (-1)" */
6759 return simplify_giv_expr (loop,
6760 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6761 ext_val, benefit);
6763 case NOT:
6764 /* "~a" is "-a - 1". Silly, but easy. */
6765 return simplify_giv_expr (loop,
6766 gen_rtx_MINUS (mode,
6767 gen_rtx_NEG (mode, XEXP (x, 0)),
6768 const1_rtx),
6769 ext_val, benefit);
6771 case USE:
6772 /* Already in proper form for invariant. */
6773 return x;
6775 case SIGN_EXTEND:
6776 case ZERO_EXTEND:
6777 case TRUNCATE:
6778 /* Conditionally recognize extensions of simple IVs. After we've
6779 computed loop traversal counts and verified the range of the
6780 source IV, we'll reevaluate this as a GIV. */
6781 if (*ext_val == NULL_RTX)
6783 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6784 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6786 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6787 return arg0;
6790 goto do_default;
6792 case REG:
6793 /* If this is a new register, we can't deal with it. */
6794 if (REGNO (x) >= max_reg_before_loop)
6795 return 0;
6797 /* Check for biv or giv. */
6798 switch (REG_IV_TYPE (ivs, REGNO (x)))
6800 case BASIC_INDUCT:
6801 return x;
6802 case GENERAL_INDUCT:
6804 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6806 /* Form expression from giv and add benefit. Ensure this giv
6807 can derive another and subtract any needed adjustment if so. */
6809 /* Increasing the benefit here is risky. The only case in which it
6810 is arguably correct is if this is the only use of V. In other
6811 cases, this will artificially inflate the benefit of the current
6812 giv, and lead to suboptimal code. Thus, it is disabled, since
6813 potentially not reducing an only marginally beneficial giv is
6814 less harmful than reducing many givs that are not really
6815 beneficial. */
6817 rtx single_use = regs->array[REGNO (x)].single_usage;
6818 if (single_use && single_use != const0_rtx)
6819 *benefit += v->benefit;
6822 if (v->cant_derive)
6823 return 0;
6825 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6826 v->src_reg, v->mult_val),
6827 v->add_val);
6829 if (v->derive_adjustment)
6830 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6831 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6832 if (*ext_val)
6834 if (!v->ext_dependent)
6835 return arg0;
6837 else
6839 *ext_val = v->ext_dependent;
6840 return arg0;
6842 return 0;
6845 default:
6846 do_default:
6847 /* If it isn't an induction variable, and it is invariant, we
6848 may be able to simplify things further by looking through
6849 the bits we just moved outside the loop. */
6850 if (loop_invariant_p (loop, x) == 1)
6852 struct movable *m;
6853 struct loop_movables *movables = LOOP_MOVABLES (loop);
6855 for (m = movables->head; m; m = m->next)
6856 if (rtx_equal_p (x, m->set_dest))
6858 /* Ok, we found a match. Substitute and simplify. */
6860 /* If we match another movable, we must use that, as
6861 this one is going away. */
6862 if (m->match)
6863 return simplify_giv_expr (loop, m->match->set_dest,
6864 ext_val, benefit);
6866 /* If consec is nonzero, this is a member of a group of
6867 instructions that were moved together. We handle this
6868 case only to the point of seeking to the last insn and
6869 looking for a REG_EQUAL. Fail if we don't find one. */
6870 if (m->consec != 0)
6872 int i = m->consec;
6873 tem = m->insn;
6876 tem = NEXT_INSN (tem);
6878 while (--i > 0);
6880 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6881 if (tem)
6882 tem = XEXP (tem, 0);
6884 else
6886 tem = single_set (m->insn);
6887 if (tem)
6888 tem = SET_SRC (tem);
6891 if (tem)
6893 /* What we are most interested in is pointer
6894 arithmetic on invariants -- only take
6895 patterns we may be able to do something with. */
6896 if (GET_CODE (tem) == PLUS
6897 || GET_CODE (tem) == MULT
6898 || GET_CODE (tem) == ASHIFT
6899 || GET_CODE (tem) == CONST_INT
6900 || GET_CODE (tem) == SYMBOL_REF)
6902 tem = simplify_giv_expr (loop, tem, ext_val,
6903 benefit);
6904 if (tem)
6905 return tem;
6907 else if (GET_CODE (tem) == CONST
6908 && GET_CODE (XEXP (tem, 0)) == PLUS
6909 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6910 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6912 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6913 ext_val, benefit);
6914 if (tem)
6915 return tem;
6918 break;
6921 break;
6924 /* Fall through to general case. */
6925 default:
6926 /* If invariant, return as USE (unless CONST_INT).
6927 Otherwise, not giv. */
6928 if (GET_CODE (x) == USE)
6929 x = XEXP (x, 0);
6931 if (loop_invariant_p (loop, x) == 1)
6933 if (GET_CODE (x) == CONST_INT)
6934 return x;
6935 if (GET_CODE (x) == CONST
6936 && GET_CODE (XEXP (x, 0)) == PLUS
6937 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6939 x = XEXP (x, 0);
6940 return gen_rtx_USE (mode, x);
6942 else
6943 return 0;
6947 /* This routine folds invariants such that there is only ever one
6948 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6950 static rtx
6951 sge_plus_constant (x, c)
6952 rtx x, c;
6954 if (GET_CODE (x) == CONST_INT)
6955 return GEN_INT (INTVAL (x) + INTVAL (c));
6956 else if (GET_CODE (x) != PLUS)
6957 return gen_rtx_PLUS (GET_MODE (x), x, c);
6958 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6960 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6961 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6963 else if (GET_CODE (XEXP (x, 0)) == PLUS
6964 || GET_CODE (XEXP (x, 1)) != PLUS)
6966 return gen_rtx_PLUS (GET_MODE (x),
6967 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6969 else
6971 return gen_rtx_PLUS (GET_MODE (x),
6972 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6976 static rtx
6977 sge_plus (mode, x, y)
6978 enum machine_mode mode;
6979 rtx x, y;
6981 while (GET_CODE (y) == PLUS)
6983 rtx a = XEXP (y, 0);
6984 if (GET_CODE (a) == CONST_INT)
6985 x = sge_plus_constant (x, a);
6986 else
6987 x = gen_rtx_PLUS (mode, x, a);
6988 y = XEXP (y, 1);
6990 if (GET_CODE (y) == CONST_INT)
6991 x = sge_plus_constant (x, y);
6992 else
6993 x = gen_rtx_PLUS (mode, x, y);
6994 return x;
6997 /* Help detect a giv that is calculated by several consecutive insns;
6998 for example,
6999 giv = biv * M
7000 giv = giv + A
7001 The caller has already identified the first insn P as having a giv as dest;
7002 we check that all other insns that set the same register follow
7003 immediately after P, that they alter nothing else,
7004 and that the result of the last is still a giv.
7006 The value is 0 if the reg set in P is not really a giv.
7007 Otherwise, the value is the amount gained by eliminating
7008 all the consecutive insns that compute the value.
7010 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
7011 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
7013 The coefficients of the ultimate giv value are stored in
7014 *MULT_VAL and *ADD_VAL. */
7016 static int
7017 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
7018 add_val, mult_val, ext_val, last_consec_insn)
7019 const struct loop *loop;
7020 int first_benefit;
7021 rtx p;
7022 rtx src_reg;
7023 rtx dest_reg;
7024 rtx *add_val;
7025 rtx *mult_val;
7026 rtx *ext_val;
7027 rtx *last_consec_insn;
7029 struct loop_ivs *ivs = LOOP_IVS (loop);
7030 struct loop_regs *regs = LOOP_REGS (loop);
7031 int count;
7032 enum rtx_code code;
7033 int benefit;
7034 rtx temp;
7035 rtx set;
7037 /* Indicate that this is a giv so that we can update the value produced in
7038 each insn of the multi-insn sequence.
7040 This induction structure will be used only by the call to
7041 general_induction_var below, so we can allocate it on our stack.
7042 If this is a giv, our caller will replace the induct var entry with
7043 a new induction structure. */
7044 struct induction *v;
7046 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7047 return 0;
7049 v = (struct induction *) alloca (sizeof (struct induction));
7050 v->src_reg = src_reg;
7051 v->mult_val = *mult_val;
7052 v->add_val = *add_val;
7053 v->benefit = first_benefit;
7054 v->cant_derive = 0;
7055 v->derive_adjustment = 0;
7056 v->ext_dependent = NULL_RTX;
7058 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7059 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7061 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7063 while (count > 0)
7065 p = NEXT_INSN (p);
7066 code = GET_CODE (p);
7068 /* If libcall, skip to end of call sequence. */
7069 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7070 p = XEXP (temp, 0);
7072 if (code == INSN
7073 && (set = single_set (p))
7074 && GET_CODE (SET_DEST (set)) == REG
7075 && SET_DEST (set) == dest_reg
7076 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7077 add_val, mult_val, ext_val, 0,
7078 &benefit, VOIDmode)
7079 /* Giv created by equivalent expression. */
7080 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7081 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7082 add_val, mult_val, ext_val, 0,
7083 &benefit, VOIDmode)))
7084 && src_reg == v->src_reg)
7086 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7087 benefit += libcall_benefit (p);
7089 count--;
7090 v->mult_val = *mult_val;
7091 v->add_val = *add_val;
7092 v->benefit += benefit;
7094 else if (code != NOTE)
7096 /* Allow insns that set something other than this giv to a
7097 constant. Such insns are needed on machines which cannot
7098 include long constants and should not disqualify a giv. */
7099 if (code == INSN
7100 && (set = single_set (p))
7101 && SET_DEST (set) != dest_reg
7102 && CONSTANT_P (SET_SRC (set)))
7103 continue;
7105 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7106 return 0;
7110 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7111 *last_consec_insn = p;
7112 return v->benefit;
7115 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7116 represented by G1. If no such expression can be found, or it is clear that
7117 it cannot possibly be a valid address, 0 is returned.
7119 To perform the computation, we note that
7120 G1 = x * v + a and
7121 G2 = y * v + b
7122 where `v' is the biv.
7124 So G2 = (y/b) * G1 + (b - a*y/x).
7126 Note that MULT = y/x.
7128 Update: A and B are now allowed to be additive expressions such that
7129 B contains all variables in A. That is, computing B-A will not require
7130 subtracting variables. */
7132 static rtx
7133 express_from_1 (a, b, mult)
7134 rtx a, b, mult;
7136 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7138 if (mult == const0_rtx)
7139 return b;
7141 /* If MULT is not 1, we cannot handle A with non-constants, since we
7142 would then be required to subtract multiples of the registers in A.
7143 This is theoretically possible, and may even apply to some Fortran
7144 constructs, but it is a lot of work and we do not attempt it here. */
7146 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7147 return NULL_RTX;
7149 /* In general these structures are sorted top to bottom (down the PLUS
7150 chain), but not left to right across the PLUS. If B is a higher
7151 order giv than A, we can strip one level and recurse. If A is higher
7152 order, we'll eventually bail out, but won't know that until the end.
7153 If they are the same, we'll strip one level around this loop. */
7155 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7157 rtx ra, rb, oa, ob, tmp;
7159 ra = XEXP (a, 0), oa = XEXP (a, 1);
7160 if (GET_CODE (ra) == PLUS)
7161 tmp = ra, ra = oa, oa = tmp;
7163 rb = XEXP (b, 0), ob = XEXP (b, 1);
7164 if (GET_CODE (rb) == PLUS)
7165 tmp = rb, rb = ob, ob = tmp;
7167 if (rtx_equal_p (ra, rb))
7168 /* We matched: remove one reg completely. */
7169 a = oa, b = ob;
7170 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7171 /* An alternate match. */
7172 a = oa, b = rb;
7173 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7174 /* An alternate match. */
7175 a = ra, b = ob;
7176 else
7178 /* Indicates an extra register in B. Strip one level from B and
7179 recurse, hoping B was the higher order expression. */
7180 ob = express_from_1 (a, ob, mult);
7181 if (ob == NULL_RTX)
7182 return NULL_RTX;
7183 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7187 /* Here we are at the last level of A, go through the cases hoping to
7188 get rid of everything but a constant. */
7190 if (GET_CODE (a) == PLUS)
7192 rtx ra, oa;
7194 ra = XEXP (a, 0), oa = XEXP (a, 1);
7195 if (rtx_equal_p (oa, b))
7196 oa = ra;
7197 else if (!rtx_equal_p (ra, b))
7198 return NULL_RTX;
7200 if (GET_CODE (oa) != CONST_INT)
7201 return NULL_RTX;
7203 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7205 else if (GET_CODE (a) == CONST_INT)
7207 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7209 else if (CONSTANT_P (a))
7211 enum machine_mode mode_a = GET_MODE (a);
7212 enum machine_mode mode_b = GET_MODE (b);
7213 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7214 return simplify_gen_binary (MINUS, mode, b, a);
7216 else if (GET_CODE (b) == PLUS)
7218 if (rtx_equal_p (a, XEXP (b, 0)))
7219 return XEXP (b, 1);
7220 else if (rtx_equal_p (a, XEXP (b, 1)))
7221 return XEXP (b, 0);
7222 else
7223 return NULL_RTX;
7225 else if (rtx_equal_p (a, b))
7226 return const0_rtx;
7228 return NULL_RTX;
7232 express_from (g1, g2)
7233 struct induction *g1, *g2;
7235 rtx mult, add;
7237 /* The value that G1 will be multiplied by must be a constant integer. Also,
7238 the only chance we have of getting a valid address is if b*c/a (see above
7239 for notation) is also an integer. */
7240 if (GET_CODE (g1->mult_val) == CONST_INT
7241 && GET_CODE (g2->mult_val) == CONST_INT)
7243 if (g1->mult_val == const0_rtx
7244 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7245 return NULL_RTX;
7246 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7248 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7249 mult = const1_rtx;
7250 else
7252 /* ??? Find out if the one is a multiple of the other? */
7253 return NULL_RTX;
7256 add = express_from_1 (g1->add_val, g2->add_val, mult);
7257 if (add == NULL_RTX)
7259 /* Failed. If we've got a multiplication factor between G1 and G2,
7260 scale G1's addend and try again. */
7261 if (INTVAL (mult) > 1)
7263 rtx g1_add_val = g1->add_val;
7264 if (GET_CODE (g1_add_val) == MULT
7265 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7267 HOST_WIDE_INT m;
7268 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7269 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7270 XEXP (g1_add_val, 0), GEN_INT (m));
7272 else
7274 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7275 mult);
7278 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7281 if (add == NULL_RTX)
7282 return NULL_RTX;
7284 /* Form simplified final result. */
7285 if (mult == const0_rtx)
7286 return add;
7287 else if (mult == const1_rtx)
7288 mult = g1->dest_reg;
7289 else
7290 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7292 if (add == const0_rtx)
7293 return mult;
7294 else
7296 if (GET_CODE (add) == PLUS
7297 && CONSTANT_P (XEXP (add, 1)))
7299 rtx tem = XEXP (add, 1);
7300 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7301 add = tem;
7304 return gen_rtx_PLUS (g2->mode, mult, add);
7308 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7309 represented by G1. This indicates that G2 should be combined with G1 and
7310 that G2 can use (either directly or via an address expression) a register
7311 used to represent G1. */
7313 static rtx
7314 combine_givs_p (g1, g2)
7315 struct induction *g1, *g2;
7317 rtx comb, ret;
7319 /* With the introduction of ext dependent givs, we must care for modes.
7320 G2 must not use a wider mode than G1. */
7321 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7322 return NULL_RTX;
7324 ret = comb = express_from (g1, g2);
7325 if (comb == NULL_RTX)
7326 return NULL_RTX;
7327 if (g1->mode != g2->mode)
7328 ret = gen_lowpart (g2->mode, comb);
7330 /* If these givs are identical, they can be combined. We use the results
7331 of express_from because the addends are not in a canonical form, so
7332 rtx_equal_p is a weaker test. */
7333 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7334 combination to be the other way round. */
7335 if (comb == g1->dest_reg
7336 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7338 return ret;
7341 /* If G2 can be expressed as a function of G1 and that function is valid
7342 as an address and no more expensive than using a register for G2,
7343 the expression of G2 in terms of G1 can be used. */
7344 if (ret != NULL_RTX
7345 && g2->giv_type == DEST_ADDR
7346 && memory_address_p (GET_MODE (g2->mem), ret))
7347 return ret;
7349 return NULL_RTX;
7352 /* Check each extension dependent giv in this class to see if its
7353 root biv is safe from wrapping in the interior mode, which would
7354 make the giv illegal. */
7356 static void
7357 check_ext_dependent_givs (bl, loop_info)
7358 struct iv_class *bl;
7359 struct loop_info *loop_info;
7361 int ze_ok = 0, se_ok = 0, info_ok = 0;
7362 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7363 HOST_WIDE_INT start_val;
7364 unsigned HOST_WIDE_INT u_end_val = 0;
7365 unsigned HOST_WIDE_INT u_start_val = 0;
7366 rtx incr = pc_rtx;
7367 struct induction *v;
7369 /* Make sure the iteration data is available. We must have
7370 constants in order to be certain of no overflow. */
7371 /* ??? An unknown iteration count with an increment of +-1
7372 combined with friendly exit tests of against an invariant
7373 value is also amenable to optimization. Not implemented. */
7374 if (loop_info->n_iterations > 0
7375 && bl->initial_value
7376 && GET_CODE (bl->initial_value) == CONST_INT
7377 && (incr = biv_total_increment (bl))
7378 && GET_CODE (incr) == CONST_INT
7379 /* Make sure the host can represent the arithmetic. */
7380 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7382 unsigned HOST_WIDE_INT abs_incr, total_incr;
7383 HOST_WIDE_INT s_end_val;
7384 int neg_incr;
7386 info_ok = 1;
7387 start_val = INTVAL (bl->initial_value);
7388 u_start_val = start_val;
7390 neg_incr = 0, abs_incr = INTVAL (incr);
7391 if (INTVAL (incr) < 0)
7392 neg_incr = 1, abs_incr = -abs_incr;
7393 total_incr = abs_incr * loop_info->n_iterations;
7395 /* Check for host arithmetic overflow. */
7396 if (total_incr / loop_info->n_iterations == abs_incr)
7398 unsigned HOST_WIDE_INT u_max;
7399 HOST_WIDE_INT s_max;
7401 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7402 s_end_val = u_end_val;
7403 u_max = GET_MODE_MASK (biv_mode);
7404 s_max = u_max >> 1;
7406 /* Check zero extension of biv ok. */
7407 if (start_val >= 0
7408 /* Check for host arithmetic overflow. */
7409 && (neg_incr
7410 ? u_end_val < u_start_val
7411 : u_end_val > u_start_val)
7412 /* Check for target arithmetic overflow. */
7413 && (neg_incr
7414 ? 1 /* taken care of with host overflow */
7415 : u_end_val <= u_max))
7417 ze_ok = 1;
7420 /* Check sign extension of biv ok. */
7421 /* ??? While it is true that overflow with signed and pointer
7422 arithmetic is undefined, I fear too many programmers don't
7423 keep this fact in mind -- myself included on occasion.
7424 So leave alone with the signed overflow optimizations. */
7425 if (start_val >= -s_max - 1
7426 /* Check for host arithmetic overflow. */
7427 && (neg_incr
7428 ? s_end_val < start_val
7429 : s_end_val > start_val)
7430 /* Check for target arithmetic overflow. */
7431 && (neg_incr
7432 ? s_end_val >= -s_max - 1
7433 : s_end_val <= s_max))
7435 se_ok = 1;
7440 /* Invalidate givs that fail the tests. */
7441 for (v = bl->giv; v; v = v->next_iv)
7442 if (v->ext_dependent)
7444 enum rtx_code code = GET_CODE (v->ext_dependent);
7445 int ok = 0;
7447 switch (code)
7449 case SIGN_EXTEND:
7450 ok = se_ok;
7451 break;
7452 case ZERO_EXTEND:
7453 ok = ze_ok;
7454 break;
7456 case TRUNCATE:
7457 /* We don't know whether this value is being used as either
7458 signed or unsigned, so to safely truncate we must satisfy
7459 both. The initial check here verifies the BIV itself;
7460 once that is successful we may check its range wrt the
7461 derived GIV. */
7462 if (se_ok && ze_ok)
7464 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7465 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7467 /* We know from the above that both endpoints are nonnegative,
7468 and that there is no wrapping. Verify that both endpoints
7469 are within the (signed) range of the outer mode. */
7470 if (u_start_val <= max && u_end_val <= max)
7471 ok = 1;
7473 break;
7475 default:
7476 abort ();
7479 if (ok)
7481 if (loop_dump_stream)
7483 fprintf (loop_dump_stream,
7484 "Verified ext dependent giv at %d of reg %d\n",
7485 INSN_UID (v->insn), bl->regno);
7488 else
7490 if (loop_dump_stream)
7492 const char *why;
7494 if (info_ok)
7495 why = "biv iteration values overflowed";
7496 else
7498 if (incr == pc_rtx)
7499 incr = biv_total_increment (bl);
7500 if (incr == const1_rtx)
7501 why = "biv iteration info incomplete; incr by 1";
7502 else
7503 why = "biv iteration info incomplete";
7506 fprintf (loop_dump_stream,
7507 "Failed ext dependent giv at %d, %s\n",
7508 INSN_UID (v->insn), why);
7510 v->ignore = 1;
7511 bl->all_reduced = 0;
7516 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7519 extend_value_for_giv (v, value)
7520 struct induction *v;
7521 rtx value;
7523 rtx ext_dep = v->ext_dependent;
7525 if (! ext_dep)
7526 return value;
7528 /* Recall that check_ext_dependent_givs verified that the known bounds
7529 of a biv did not overflow or wrap with respect to the extension for
7530 the giv. Therefore, constants need no additional adjustment. */
7531 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7532 return value;
7534 /* Otherwise, we must adjust the value to compensate for the
7535 differing modes of the biv and the giv. */
7536 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7539 struct combine_givs_stats
7541 int giv_number;
7542 int total_benefit;
7545 static int
7546 cmp_combine_givs_stats (xp, yp)
7547 const PTR xp;
7548 const PTR yp;
7550 const struct combine_givs_stats * const x =
7551 (const struct combine_givs_stats *) xp;
7552 const struct combine_givs_stats * const y =
7553 (const struct combine_givs_stats *) yp;
7554 int d;
7555 d = y->total_benefit - x->total_benefit;
7556 /* Stabilize the sort. */
7557 if (!d)
7558 d = x->giv_number - y->giv_number;
7559 return d;
7562 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7563 any other. If so, point SAME to the giv combined with and set NEW_REG to
7564 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7565 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7567 static void
7568 combine_givs (regs, bl)
7569 struct loop_regs *regs;
7570 struct iv_class *bl;
7572 /* Additional benefit to add for being combined multiple times. */
7573 const int extra_benefit = 3;
7575 struct induction *g1, *g2, **giv_array;
7576 int i, j, k, giv_count;
7577 struct combine_givs_stats *stats;
7578 rtx *can_combine;
7580 /* Count givs, because bl->giv_count is incorrect here. */
7581 giv_count = 0;
7582 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7583 if (!g1->ignore)
7584 giv_count++;
7586 giv_array
7587 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7588 i = 0;
7589 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7590 if (!g1->ignore)
7591 giv_array[i++] = g1;
7593 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7594 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7596 for (i = 0; i < giv_count; i++)
7598 int this_benefit;
7599 rtx single_use;
7601 g1 = giv_array[i];
7602 stats[i].giv_number = i;
7604 /* If a DEST_REG GIV is used only once, do not allow it to combine
7605 with anything, for in doing so we will gain nothing that cannot
7606 be had by simply letting the GIV with which we would have combined
7607 to be reduced on its own. The losage shows up in particular with
7608 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7609 be seen elsewhere as well. */
7610 if (g1->giv_type == DEST_REG
7611 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7612 && single_use != const0_rtx)
7613 continue;
7615 this_benefit = g1->benefit;
7616 /* Add an additional weight for zero addends. */
7617 if (g1->no_const_addval)
7618 this_benefit += 1;
7620 for (j = 0; j < giv_count; j++)
7622 rtx this_combine;
7624 g2 = giv_array[j];
7625 if (g1 != g2
7626 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7628 can_combine[i * giv_count + j] = this_combine;
7629 this_benefit += g2->benefit + extra_benefit;
7632 stats[i].total_benefit = this_benefit;
7635 /* Iterate, combining until we can't. */
7636 restart:
7637 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7639 if (loop_dump_stream)
7641 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7642 for (k = 0; k < giv_count; k++)
7644 g1 = giv_array[stats[k].giv_number];
7645 if (!g1->combined_with && !g1->same)
7646 fprintf (loop_dump_stream, " {%d, %d}",
7647 INSN_UID (giv_array[stats[k].giv_number]->insn),
7648 stats[k].total_benefit);
7650 putc ('\n', loop_dump_stream);
7653 for (k = 0; k < giv_count; k++)
7655 int g1_add_benefit = 0;
7657 i = stats[k].giv_number;
7658 g1 = giv_array[i];
7660 /* If it has already been combined, skip. */
7661 if (g1->combined_with || g1->same)
7662 continue;
7664 for (j = 0; j < giv_count; j++)
7666 g2 = giv_array[j];
7667 if (g1 != g2 && can_combine[i * giv_count + j]
7668 /* If it has already been combined, skip. */
7669 && ! g2->same && ! g2->combined_with)
7671 int l;
7673 g2->new_reg = can_combine[i * giv_count + j];
7674 g2->same = g1;
7675 /* For destination, we now may replace by mem expression instead
7676 of register. This changes the costs considerably, so add the
7677 compensation. */
7678 if (g2->giv_type == DEST_ADDR)
7679 g2->benefit = (g2->benefit + reg_address_cost
7680 - address_cost (g2->new_reg,
7681 GET_MODE (g2->mem)));
7682 g1->combined_with++;
7683 g1->lifetime += g2->lifetime;
7685 g1_add_benefit += g2->benefit;
7687 /* ??? The new final_[bg]iv_value code does a much better job
7688 of finding replaceable giv's, and hence this code may no
7689 longer be necessary. */
7690 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7691 g1_add_benefit -= copy_cost;
7693 /* To help optimize the next set of combinations, remove
7694 this giv from the benefits of other potential mates. */
7695 for (l = 0; l < giv_count; ++l)
7697 int m = stats[l].giv_number;
7698 if (can_combine[m * giv_count + j])
7699 stats[l].total_benefit -= g2->benefit + extra_benefit;
7702 if (loop_dump_stream)
7703 fprintf (loop_dump_stream,
7704 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7705 INSN_UID (g2->insn), INSN_UID (g1->insn),
7706 g1->benefit, g1_add_benefit, g1->lifetime);
7710 /* To help optimize the next set of combinations, remove
7711 this giv from the benefits of other potential mates. */
7712 if (g1->combined_with)
7714 for (j = 0; j < giv_count; ++j)
7716 int m = stats[j].giv_number;
7717 if (can_combine[m * giv_count + i])
7718 stats[j].total_benefit -= g1->benefit + extra_benefit;
7721 g1->benefit += g1_add_benefit;
7723 /* We've finished with this giv, and everything it touched.
7724 Restart the combination so that proper weights for the
7725 rest of the givs are properly taken into account. */
7726 /* ??? Ideally we would compact the arrays at this point, so
7727 as to not cover old ground. But sanely compacting
7728 can_combine is tricky. */
7729 goto restart;
7733 /* Clean up. */
7734 free (stats);
7735 free (can_combine);
7738 /* Generate sequence for REG = B * M + A. */
7740 static rtx
7741 gen_add_mult (b, m, a, reg)
7742 rtx b; /* initial value of basic induction variable */
7743 rtx m; /* multiplicative constant */
7744 rtx a; /* additive constant */
7745 rtx reg; /* destination register */
7747 rtx seq;
7748 rtx result;
7750 start_sequence ();
7751 /* Use unsigned arithmetic. */
7752 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7753 if (reg != result)
7754 emit_move_insn (reg, result);
7755 seq = get_insns ();
7756 end_sequence ();
7758 return seq;
7762 /* Update registers created in insn sequence SEQ. */
7764 static void
7765 loop_regs_update (loop, seq)
7766 const struct loop *loop ATTRIBUTE_UNUSED;
7767 rtx seq;
7769 rtx insn;
7771 /* Update register info for alias analysis. */
7773 if (seq == NULL_RTX)
7774 return;
7776 if (INSN_P (seq))
7778 insn = seq;
7779 while (insn != NULL_RTX)
7781 rtx set = single_set (insn);
7783 if (set && GET_CODE (SET_DEST (set)) == REG)
7784 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7786 insn = NEXT_INSN (insn);
7789 else if (GET_CODE (seq) == SET
7790 && GET_CODE (SET_DEST (seq)) == REG)
7791 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7795 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7797 void
7798 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7799 const struct loop *loop;
7800 rtx b; /* initial value of basic induction variable */
7801 rtx m; /* multiplicative constant */
7802 rtx a; /* additive constant */
7803 rtx reg; /* destination register */
7804 basic_block before_bb;
7805 rtx before_insn;
7807 rtx seq;
7809 if (! before_insn)
7811 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7812 return;
7815 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7816 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7818 /* Increase the lifetime of any invariants moved further in code. */
7819 update_reg_last_use (a, before_insn);
7820 update_reg_last_use (b, before_insn);
7821 update_reg_last_use (m, before_insn);
7823 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7825 /* It is possible that the expansion created lots of new registers.
7826 Iterate over the sequence we just created and record them all. */
7827 loop_regs_update (loop, seq);
7831 /* Emit insns in loop pre-header to set REG = B * M + A. */
7833 void
7834 loop_iv_add_mult_sink (loop, b, m, a, reg)
7835 const struct loop *loop;
7836 rtx b; /* initial value of basic induction variable */
7837 rtx m; /* multiplicative constant */
7838 rtx a; /* additive constant */
7839 rtx reg; /* destination register */
7841 rtx seq;
7843 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7844 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7846 /* Increase the lifetime of any invariants moved further in code.
7847 ???? Is this really necessary? */
7848 update_reg_last_use (a, loop->sink);
7849 update_reg_last_use (b, loop->sink);
7850 update_reg_last_use (m, loop->sink);
7852 loop_insn_sink (loop, seq);
7854 /* It is possible that the expansion created lots of new registers.
7855 Iterate over the sequence we just created and record them all. */
7856 loop_regs_update (loop, seq);
7860 /* Emit insns after loop to set REG = B * M + A. */
7862 void
7863 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7864 const struct loop *loop;
7865 rtx b; /* initial value of basic induction variable */
7866 rtx m; /* multiplicative constant */
7867 rtx a; /* additive constant */
7868 rtx reg; /* destination register */
7870 rtx seq;
7872 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7873 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7875 loop_insn_hoist (loop, seq);
7877 /* It is possible that the expansion created lots of new registers.
7878 Iterate over the sequence we just created and record them all. */
7879 loop_regs_update (loop, seq);
7884 /* Similar to gen_add_mult, but compute cost rather than generating
7885 sequence. */
7887 static int
7888 iv_add_mult_cost (b, m, a, reg)
7889 rtx b; /* initial value of basic induction variable */
7890 rtx m; /* multiplicative constant */
7891 rtx a; /* additive constant */
7892 rtx reg; /* destination register */
7894 int cost = 0;
7895 rtx last, result;
7897 start_sequence ();
7898 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7899 if (reg != result)
7900 emit_move_insn (reg, result);
7901 last = get_last_insn ();
7902 while (last)
7904 rtx t = single_set (last);
7905 if (t)
7906 cost += rtx_cost (SET_SRC (t), SET);
7907 last = PREV_INSN (last);
7909 end_sequence ();
7910 return cost;
7913 /* Test whether A * B can be computed without
7914 an actual multiply insn. Value is 1 if so.
7916 ??? This function stinks because it generates a ton of wasted RTL
7917 ??? and as a result fragments GC memory to no end. There are other
7918 ??? places in the compiler which are invoked a lot and do the same
7919 ??? thing, generate wasted RTL just to see if something is possible. */
7921 static int
7922 product_cheap_p (a, b)
7923 rtx a;
7924 rtx b;
7926 rtx tmp;
7927 int win, n_insns;
7929 /* If only one is constant, make it B. */
7930 if (GET_CODE (a) == CONST_INT)
7931 tmp = a, a = b, b = tmp;
7933 /* If first constant, both constant, so don't need multiply. */
7934 if (GET_CODE (a) == CONST_INT)
7935 return 1;
7937 /* If second not constant, neither is constant, so would need multiply. */
7938 if (GET_CODE (b) != CONST_INT)
7939 return 0;
7941 /* One operand is constant, so might not need multiply insn. Generate the
7942 code for the multiply and see if a call or multiply, or long sequence
7943 of insns is generated. */
7945 start_sequence ();
7946 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7947 tmp = get_insns ();
7948 end_sequence ();
7950 win = 1;
7951 if (INSN_P (tmp))
7953 n_insns = 0;
7954 while (tmp != NULL_RTX)
7956 rtx next = NEXT_INSN (tmp);
7958 if (++n_insns > 3
7959 || GET_CODE (tmp) != INSN
7960 || (GET_CODE (PATTERN (tmp)) == SET
7961 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7962 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7963 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7964 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7966 win = 0;
7967 break;
7970 tmp = next;
7973 else if (GET_CODE (tmp) == SET
7974 && GET_CODE (SET_SRC (tmp)) == MULT)
7975 win = 0;
7976 else if (GET_CODE (tmp) == PARALLEL
7977 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7978 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7979 win = 0;
7981 return win;
7984 /* Check to see if loop can be terminated by a "decrement and branch until
7985 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7986 Also try reversing an increment loop to a decrement loop
7987 to see if the optimization can be performed.
7988 Value is nonzero if optimization was performed. */
7990 /* This is useful even if the architecture doesn't have such an insn,
7991 because it might change a loops which increments from 0 to n to a loop
7992 which decrements from n to 0. A loop that decrements to zero is usually
7993 faster than one that increments from zero. */
7995 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7996 such as approx_final_value, biv_total_increment, loop_iterations, and
7997 final_[bg]iv_value. */
7999 static int
8000 check_dbra_loop (loop, insn_count)
8001 struct loop *loop;
8002 int insn_count;
8004 struct loop_info *loop_info = LOOP_INFO (loop);
8005 struct loop_regs *regs = LOOP_REGS (loop);
8006 struct loop_ivs *ivs = LOOP_IVS (loop);
8007 struct iv_class *bl;
8008 rtx reg;
8009 rtx jump_label;
8010 rtx final_value;
8011 rtx start_value;
8012 rtx new_add_val;
8013 rtx comparison;
8014 rtx before_comparison;
8015 rtx p;
8016 rtx jump;
8017 rtx first_compare;
8018 int compare_and_branch;
8019 rtx loop_start = loop->start;
8020 rtx loop_end = loop->end;
8022 /* If last insn is a conditional branch, and the insn before tests a
8023 register value, try to optimize it. Otherwise, we can't do anything. */
8025 jump = PREV_INSN (loop_end);
8026 comparison = get_condition_for_loop (loop, jump);
8027 if (comparison == 0)
8028 return 0;
8029 if (!onlyjump_p (jump))
8030 return 0;
8032 /* Try to compute whether the compare/branch at the loop end is one or
8033 two instructions. */
8034 get_condition (jump, &first_compare);
8035 if (first_compare == jump)
8036 compare_and_branch = 1;
8037 else if (first_compare == prev_nonnote_insn (jump))
8038 compare_and_branch = 2;
8039 else
8040 return 0;
8043 /* If more than one condition is present to control the loop, then
8044 do not proceed, as this function does not know how to rewrite
8045 loop tests with more than one condition.
8047 Look backwards from the first insn in the last comparison
8048 sequence and see if we've got another comparison sequence. */
8050 rtx jump1;
8051 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8052 if (GET_CODE (jump1) == JUMP_INSN)
8053 return 0;
8056 /* Check all of the bivs to see if the compare uses one of them.
8057 Skip biv's set more than once because we can't guarantee that
8058 it will be zero on the last iteration. Also skip if the biv is
8059 used between its update and the test insn. */
8061 for (bl = ivs->list; bl; bl = bl->next)
8063 if (bl->biv_count == 1
8064 && ! bl->biv->maybe_multiple
8065 && bl->biv->dest_reg == XEXP (comparison, 0)
8066 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8067 first_compare))
8068 break;
8071 if (! bl)
8072 return 0;
8074 /* Look for the case where the basic induction variable is always
8075 nonnegative, and equals zero on the last iteration.
8076 In this case, add a reg_note REG_NONNEG, which allows the
8077 m68k DBRA instruction to be used. */
8079 if (((GET_CODE (comparison) == GT
8080 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8081 && INTVAL (XEXP (comparison, 1)) == -1)
8082 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8083 && GET_CODE (bl->biv->add_val) == CONST_INT
8084 && INTVAL (bl->biv->add_val) < 0)
8086 /* Initial value must be greater than 0,
8087 init_val % -dec_value == 0 to ensure that it equals zero on
8088 the last iteration */
8090 if (GET_CODE (bl->initial_value) == CONST_INT
8091 && INTVAL (bl->initial_value) > 0
8092 && (INTVAL (bl->initial_value)
8093 % (-INTVAL (bl->biv->add_val))) == 0)
8095 /* register always nonnegative, add REG_NOTE to branch */
8096 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8097 REG_NOTES (jump)
8098 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8099 REG_NOTES (jump));
8100 bl->nonneg = 1;
8102 return 1;
8105 /* If the decrement is 1 and the value was tested as >= 0 before
8106 the loop, then we can safely optimize. */
8107 for (p = loop_start; p; p = PREV_INSN (p))
8109 if (GET_CODE (p) == CODE_LABEL)
8110 break;
8111 if (GET_CODE (p) != JUMP_INSN)
8112 continue;
8114 before_comparison = get_condition_for_loop (loop, p);
8115 if (before_comparison
8116 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8117 && GET_CODE (before_comparison) == LT
8118 && XEXP (before_comparison, 1) == const0_rtx
8119 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8120 && INTVAL (bl->biv->add_val) == -1)
8122 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8123 REG_NOTES (jump)
8124 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8125 REG_NOTES (jump));
8126 bl->nonneg = 1;
8128 return 1;
8132 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8133 && INTVAL (bl->biv->add_val) > 0)
8135 /* Try to change inc to dec, so can apply above optimization. */
8136 /* Can do this if:
8137 all registers modified are induction variables or invariant,
8138 all memory references have non-overlapping addresses
8139 (obviously true if only one write)
8140 allow 2 insns for the compare/jump at the end of the loop. */
8141 /* Also, we must avoid any instructions which use both the reversed
8142 biv and another biv. Such instructions will fail if the loop is
8143 reversed. We meet this condition by requiring that either
8144 no_use_except_counting is true, or else that there is only
8145 one biv. */
8146 int num_nonfixed_reads = 0;
8147 /* 1 if the iteration var is used only to count iterations. */
8148 int no_use_except_counting = 0;
8149 /* 1 if the loop has no memory store, or it has a single memory store
8150 which is reversible. */
8151 int reversible_mem_store = 1;
8153 if (bl->giv_count == 0
8154 && !loop->exit_count
8155 && !loop_info->has_multiple_exit_targets)
8157 rtx bivreg = regno_reg_rtx[bl->regno];
8158 struct iv_class *blt;
8160 /* If there are no givs for this biv, and the only exit is the
8161 fall through at the end of the loop, then
8162 see if perhaps there are no uses except to count. */
8163 no_use_except_counting = 1;
8164 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8165 if (INSN_P (p))
8167 rtx set = single_set (p);
8169 if (set && GET_CODE (SET_DEST (set)) == REG
8170 && REGNO (SET_DEST (set)) == bl->regno)
8171 /* An insn that sets the biv is okay. */
8173 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8174 || p == prev_nonnote_insn (loop_end))
8175 && reg_mentioned_p (bivreg, PATTERN (p)))
8177 /* If either of these insns uses the biv and sets a pseudo
8178 that has more than one usage, then the biv has uses
8179 other than counting since it's used to derive a value
8180 that is used more than one time. */
8181 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8182 regs);
8183 if (regs->multiple_uses)
8185 no_use_except_counting = 0;
8186 break;
8189 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8191 no_use_except_counting = 0;
8192 break;
8196 /* A biv has uses besides counting if it is used to set
8197 another biv. */
8198 for (blt = ivs->list; blt; blt = blt->next)
8199 if (blt->init_set
8200 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8202 no_use_except_counting = 0;
8203 break;
8207 if (no_use_except_counting)
8208 /* No need to worry about MEMs. */
8210 else if (loop_info->num_mem_sets <= 1)
8212 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8213 if (INSN_P (p))
8214 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8216 /* If the loop has a single store, and the destination address is
8217 invariant, then we can't reverse the loop, because this address
8218 might then have the wrong value at loop exit.
8219 This would work if the source was invariant also, however, in that
8220 case, the insn should have been moved out of the loop. */
8222 if (loop_info->num_mem_sets == 1)
8224 struct induction *v;
8226 /* If we could prove that each of the memory locations
8227 written to was different, then we could reverse the
8228 store -- but we don't presently have any way of
8229 knowing that. */
8230 reversible_mem_store = 0;
8232 /* If the store depends on a register that is set after the
8233 store, it depends on the initial value, and is thus not
8234 reversible. */
8235 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8237 if (v->giv_type == DEST_REG
8238 && reg_mentioned_p (v->dest_reg,
8239 PATTERN (loop_info->first_loop_store_insn))
8240 && loop_insn_first_p (loop_info->first_loop_store_insn,
8241 v->insn))
8242 reversible_mem_store = 0;
8246 else
8247 return 0;
8249 /* This code only acts for innermost loops. Also it simplifies
8250 the memory address check by only reversing loops with
8251 zero or one memory access.
8252 Two memory accesses could involve parts of the same array,
8253 and that can't be reversed.
8254 If the biv is used only for counting, than we don't need to worry
8255 about all these things. */
8257 if ((num_nonfixed_reads <= 1
8258 && ! loop_info->has_nonconst_call
8259 && ! loop_info->has_prefetch
8260 && ! loop_info->has_volatile
8261 && reversible_mem_store
8262 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8263 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8264 && (bl == ivs->list && bl->next == 0))
8265 || (no_use_except_counting && ! loop_info->has_prefetch))
8267 rtx tem;
8269 /* Loop can be reversed. */
8270 if (loop_dump_stream)
8271 fprintf (loop_dump_stream, "Can reverse loop\n");
8273 /* Now check other conditions:
8275 The increment must be a constant, as must the initial value,
8276 and the comparison code must be LT.
8278 This test can probably be improved since +/- 1 in the constant
8279 can be obtained by changing LT to LE and vice versa; this is
8280 confusing. */
8282 if (comparison
8283 /* for constants, LE gets turned into LT */
8284 && (GET_CODE (comparison) == LT
8285 || (GET_CODE (comparison) == LE
8286 && no_use_except_counting)))
8288 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8289 rtx initial_value, comparison_value;
8290 int nonneg = 0;
8291 enum rtx_code cmp_code;
8292 int comparison_const_width;
8293 unsigned HOST_WIDE_INT comparison_sign_mask;
8295 add_val = INTVAL (bl->biv->add_val);
8296 comparison_value = XEXP (comparison, 1);
8297 if (GET_MODE (comparison_value) == VOIDmode)
8298 comparison_const_width
8299 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8300 else
8301 comparison_const_width
8302 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8303 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8304 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8305 comparison_sign_mask
8306 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8308 /* If the comparison value is not a loop invariant, then we
8309 can not reverse this loop.
8311 ??? If the insns which initialize the comparison value as
8312 a whole compute an invariant result, then we could move
8313 them out of the loop and proceed with loop reversal. */
8314 if (! loop_invariant_p (loop, comparison_value))
8315 return 0;
8317 if (GET_CODE (comparison_value) == CONST_INT)
8318 comparison_val = INTVAL (comparison_value);
8319 initial_value = bl->initial_value;
8321 /* Normalize the initial value if it is an integer and
8322 has no other use except as a counter. This will allow
8323 a few more loops to be reversed. */
8324 if (no_use_except_counting
8325 && GET_CODE (comparison_value) == CONST_INT
8326 && GET_CODE (initial_value) == CONST_INT)
8328 comparison_val = comparison_val - INTVAL (bl->initial_value);
8329 /* The code below requires comparison_val to be a multiple
8330 of add_val in order to do the loop reversal, so
8331 round up comparison_val to a multiple of add_val.
8332 Since comparison_value is constant, we know that the
8333 current comparison code is LT. */
8334 comparison_val = comparison_val + add_val - 1;
8335 comparison_val
8336 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8337 /* We postpone overflow checks for COMPARISON_VAL here;
8338 even if there is an overflow, we might still be able to
8339 reverse the loop, if converting the loop exit test to
8340 NE is possible. */
8341 initial_value = const0_rtx;
8344 /* First check if we can do a vanilla loop reversal. */
8345 if (initial_value == const0_rtx
8346 /* If we have a decrement_and_branch_on_count,
8347 prefer the NE test, since this will allow that
8348 instruction to be generated. Note that we must
8349 use a vanilla loop reversal if the biv is used to
8350 calculate a giv or has a non-counting use. */
8351 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8352 && defined (HAVE_decrement_and_branch_on_count)
8353 && (! (add_val == 1 && loop->vtop
8354 && (bl->biv_count == 0
8355 || no_use_except_counting)))
8356 #endif
8357 && GET_CODE (comparison_value) == CONST_INT
8358 /* Now do postponed overflow checks on COMPARISON_VAL. */
8359 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8360 & comparison_sign_mask))
8362 /* Register will always be nonnegative, with value
8363 0 on last iteration */
8364 add_adjust = add_val;
8365 nonneg = 1;
8366 cmp_code = GE;
8368 else if (add_val == 1 && loop->vtop
8369 && (bl->biv_count == 0
8370 || no_use_except_counting))
8372 add_adjust = 0;
8373 cmp_code = NE;
8375 else
8376 return 0;
8378 if (GET_CODE (comparison) == LE)
8379 add_adjust -= add_val;
8381 /* If the initial value is not zero, or if the comparison
8382 value is not an exact multiple of the increment, then we
8383 can not reverse this loop. */
8384 if (initial_value == const0_rtx
8385 && GET_CODE (comparison_value) == CONST_INT)
8387 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8388 return 0;
8390 else
8392 if (! no_use_except_counting || add_val != 1)
8393 return 0;
8396 final_value = comparison_value;
8398 /* Reset these in case we normalized the initial value
8399 and comparison value above. */
8400 if (GET_CODE (comparison_value) == CONST_INT
8401 && GET_CODE (initial_value) == CONST_INT)
8403 comparison_value = GEN_INT (comparison_val);
8404 final_value
8405 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8407 bl->initial_value = initial_value;
8409 /* Save some info needed to produce the new insns. */
8410 reg = bl->biv->dest_reg;
8411 jump_label = condjump_label (PREV_INSN (loop_end));
8412 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8414 /* Set start_value; if this is not a CONST_INT, we need
8415 to generate a SUB.
8416 Initialize biv to start_value before loop start.
8417 The old initializing insn will be deleted as a
8418 dead store by flow.c. */
8419 if (initial_value == const0_rtx
8420 && GET_CODE (comparison_value) == CONST_INT)
8422 start_value = GEN_INT (comparison_val - add_adjust);
8423 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8425 else if (GET_CODE (initial_value) == CONST_INT)
8427 enum machine_mode mode = GET_MODE (reg);
8428 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8429 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8431 if (add_insn == 0)
8432 return 0;
8434 start_value
8435 = gen_rtx_PLUS (mode, comparison_value, offset);
8436 loop_insn_hoist (loop, add_insn);
8437 if (GET_CODE (comparison) == LE)
8438 final_value = gen_rtx_PLUS (mode, comparison_value,
8439 GEN_INT (add_val));
8441 else if (! add_adjust)
8443 enum machine_mode mode = GET_MODE (reg);
8444 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8445 initial_value);
8447 if (sub_insn == 0)
8448 return 0;
8449 start_value
8450 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8451 loop_insn_hoist (loop, sub_insn);
8453 else
8454 /* We could handle the other cases too, but it'll be
8455 better to have a testcase first. */
8456 return 0;
8458 /* We may not have a single insn which can increment a reg, so
8459 create a sequence to hold all the insns from expand_inc. */
8460 start_sequence ();
8461 expand_inc (reg, new_add_val);
8462 tem = get_insns ();
8463 end_sequence ();
8465 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8466 delete_insn (bl->biv->insn);
8468 /* Update biv info to reflect its new status. */
8469 bl->biv->insn = p;
8470 bl->initial_value = start_value;
8471 bl->biv->add_val = new_add_val;
8473 /* Update loop info. */
8474 loop_info->initial_value = reg;
8475 loop_info->initial_equiv_value = reg;
8476 loop_info->final_value = const0_rtx;
8477 loop_info->final_equiv_value = const0_rtx;
8478 loop_info->comparison_value = const0_rtx;
8479 loop_info->comparison_code = cmp_code;
8480 loop_info->increment = new_add_val;
8482 /* Inc LABEL_NUSES so that delete_insn will
8483 not delete the label. */
8484 LABEL_NUSES (XEXP (jump_label, 0))++;
8486 /* Emit an insn after the end of the loop to set the biv's
8487 proper exit value if it is used anywhere outside the loop. */
8488 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8489 || ! bl->init_insn
8490 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8491 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8493 /* Delete compare/branch at end of loop. */
8494 delete_related_insns (PREV_INSN (loop_end));
8495 if (compare_and_branch == 2)
8496 delete_related_insns (first_compare);
8498 /* Add new compare/branch insn at end of loop. */
8499 start_sequence ();
8500 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8501 GET_MODE (reg), 0,
8502 XEXP (jump_label, 0));
8503 tem = get_insns ();
8504 end_sequence ();
8505 emit_jump_insn_before (tem, loop_end);
8507 for (tem = PREV_INSN (loop_end);
8508 tem && GET_CODE (tem) != JUMP_INSN;
8509 tem = PREV_INSN (tem))
8512 if (tem)
8513 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8515 if (nonneg)
8517 if (tem)
8519 /* Increment of LABEL_NUSES done above. */
8520 /* Register is now always nonnegative,
8521 so add REG_NONNEG note to the branch. */
8522 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8523 REG_NOTES (tem));
8525 bl->nonneg = 1;
8528 /* No insn may reference both the reversed and another biv or it
8529 will fail (see comment near the top of the loop reversal
8530 code).
8531 Earlier on, we have verified that the biv has no use except
8532 counting, or it is the only biv in this function.
8533 However, the code that computes no_use_except_counting does
8534 not verify reg notes. It's possible to have an insn that
8535 references another biv, and has a REG_EQUAL note with an
8536 expression based on the reversed biv. To avoid this case,
8537 remove all REG_EQUAL notes based on the reversed biv
8538 here. */
8539 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8540 if (INSN_P (p))
8542 rtx *pnote;
8543 rtx set = single_set (p);
8544 /* If this is a set of a GIV based on the reversed biv, any
8545 REG_EQUAL notes should still be correct. */
8546 if (! set
8547 || GET_CODE (SET_DEST (set)) != REG
8548 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8549 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8550 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8551 for (pnote = &REG_NOTES (p); *pnote;)
8553 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8554 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8555 XEXP (*pnote, 0)))
8556 *pnote = XEXP (*pnote, 1);
8557 else
8558 pnote = &XEXP (*pnote, 1);
8562 /* Mark that this biv has been reversed. Each giv which depends
8563 on this biv, and which is also live past the end of the loop
8564 will have to be fixed up. */
8566 bl->reversed = 1;
8568 if (loop_dump_stream)
8570 fprintf (loop_dump_stream, "Reversed loop");
8571 if (bl->nonneg)
8572 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8573 else
8574 fprintf (loop_dump_stream, "\n");
8577 return 1;
8582 return 0;
8585 /* Verify whether the biv BL appears to be eliminable,
8586 based on the insns in the loop that refer to it.
8588 If ELIMINATE_P is nonzero, actually do the elimination.
8590 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8591 determine whether invariant insns should be placed inside or at the
8592 start of the loop. */
8594 static int
8595 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8596 const struct loop *loop;
8597 struct iv_class *bl;
8598 int eliminate_p;
8599 int threshold, insn_count;
8601 struct loop_ivs *ivs = LOOP_IVS (loop);
8602 rtx reg = bl->biv->dest_reg;
8603 rtx p;
8605 /* Scan all insns in the loop, stopping if we find one that uses the
8606 biv in a way that we cannot eliminate. */
8608 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8610 enum rtx_code code = GET_CODE (p);
8611 basic_block where_bb = 0;
8612 rtx where_insn = threshold >= insn_count ? 0 : p;
8613 rtx note;
8615 /* If this is a libcall that sets a giv, skip ahead to its end. */
8616 if (GET_RTX_CLASS (code) == 'i')
8618 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8620 if (note)
8622 rtx last = XEXP (note, 0);
8623 rtx set = single_set (last);
8625 if (set && GET_CODE (SET_DEST (set)) == REG)
8627 unsigned int regno = REGNO (SET_DEST (set));
8629 if (regno < ivs->n_regs
8630 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8631 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8632 p = last;
8637 /* Closely examine the insn if the biv is mentioned. */
8638 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8639 && reg_mentioned_p (reg, PATTERN (p))
8640 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8641 eliminate_p, where_bb, where_insn))
8643 if (loop_dump_stream)
8644 fprintf (loop_dump_stream,
8645 "Cannot eliminate biv %d: biv used in insn %d.\n",
8646 bl->regno, INSN_UID (p));
8647 break;
8650 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8651 if (eliminate_p
8652 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8653 && reg_mentioned_p (reg, XEXP (note, 0)))
8654 remove_note (p, note);
8657 if (p == loop->end)
8659 if (loop_dump_stream)
8660 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8661 bl->regno, eliminate_p ? "was" : "can be");
8662 return 1;
8665 return 0;
8668 /* INSN and REFERENCE are instructions in the same insn chain.
8669 Return nonzero if INSN is first. */
8672 loop_insn_first_p (insn, reference)
8673 rtx insn, reference;
8675 rtx p, q;
8677 for (p = insn, q = reference;;)
8679 /* Start with test for not first so that INSN == REFERENCE yields not
8680 first. */
8681 if (q == insn || ! p)
8682 return 0;
8683 if (p == reference || ! q)
8684 return 1;
8686 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8687 previous insn, hence the <= comparison below does not work if
8688 P is a note. */
8689 if (INSN_UID (p) < max_uid_for_loop
8690 && INSN_UID (q) < max_uid_for_loop
8691 && GET_CODE (p) != NOTE)
8692 return INSN_LUID (p) <= INSN_LUID (q);
8694 if (INSN_UID (p) >= max_uid_for_loop
8695 || GET_CODE (p) == NOTE)
8696 p = NEXT_INSN (p);
8697 if (INSN_UID (q) >= max_uid_for_loop)
8698 q = NEXT_INSN (q);
8702 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8703 the offset that we have to take into account due to auto-increment /
8704 div derivation is zero. */
8705 static int
8706 biv_elimination_giv_has_0_offset (biv, giv, insn)
8707 struct induction *biv, *giv;
8708 rtx insn;
8710 /* If the giv V had the auto-inc address optimization applied
8711 to it, and INSN occurs between the giv insn and the biv
8712 insn, then we'd have to adjust the value used here.
8713 This is rare, so we don't bother to make this possible. */
8714 if (giv->auto_inc_opt
8715 && ((loop_insn_first_p (giv->insn, insn)
8716 && loop_insn_first_p (insn, biv->insn))
8717 || (loop_insn_first_p (biv->insn, insn)
8718 && loop_insn_first_p (insn, giv->insn))))
8719 return 0;
8721 return 1;
8724 /* If BL appears in X (part of the pattern of INSN), see if we can
8725 eliminate its use. If so, return 1. If not, return 0.
8727 If BIV does not appear in X, return 1.
8729 If ELIMINATE_P is nonzero, actually do the elimination.
8730 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8731 Depending on how many items have been moved out of the loop, it
8732 will either be before INSN (when WHERE_INSN is nonzero) or at the
8733 start of the loop (when WHERE_INSN is zero). */
8735 static int
8736 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8737 const struct loop *loop;
8738 rtx x, insn;
8739 struct iv_class *bl;
8740 int eliminate_p;
8741 basic_block where_bb;
8742 rtx where_insn;
8744 enum rtx_code code = GET_CODE (x);
8745 rtx reg = bl->biv->dest_reg;
8746 enum machine_mode mode = GET_MODE (reg);
8747 struct induction *v;
8748 rtx arg, tem;
8749 #ifdef HAVE_cc0
8750 rtx new;
8751 #endif
8752 int arg_operand;
8753 const char *fmt;
8754 int i, j;
8756 switch (code)
8758 case REG:
8759 /* If we haven't already been able to do something with this BIV,
8760 we can't eliminate it. */
8761 if (x == reg)
8762 return 0;
8763 return 1;
8765 case SET:
8766 /* If this sets the BIV, it is not a problem. */
8767 if (SET_DEST (x) == reg)
8768 return 1;
8770 /* If this is an insn that defines a giv, it is also ok because
8771 it will go away when the giv is reduced. */
8772 for (v = bl->giv; v; v = v->next_iv)
8773 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8774 return 1;
8776 #ifdef HAVE_cc0
8777 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8779 /* Can replace with any giv that was reduced and
8780 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8781 Require a constant for MULT_VAL, so we know it's nonzero.
8782 ??? We disable this optimization to avoid potential
8783 overflows. */
8785 for (v = bl->giv; v; v = v->next_iv)
8786 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8787 && v->add_val == const0_rtx
8788 && ! v->ignore && ! v->maybe_dead && v->always_computable
8789 && v->mode == mode
8790 && 0)
8792 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8793 continue;
8795 if (! eliminate_p)
8796 return 1;
8798 /* If the giv has the opposite direction of change,
8799 then reverse the comparison. */
8800 if (INTVAL (v->mult_val) < 0)
8801 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8802 const0_rtx, v->new_reg);
8803 else
8804 new = v->new_reg;
8806 /* We can probably test that giv's reduced reg. */
8807 if (validate_change (insn, &SET_SRC (x), new, 0))
8808 return 1;
8811 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8812 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8813 Require a constant for MULT_VAL, so we know it's nonzero.
8814 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8815 overflow problem. */
8817 for (v = bl->giv; v; v = v->next_iv)
8818 if (GET_CODE (v->mult_val) == CONST_INT
8819 && v->mult_val != const0_rtx
8820 && ! v->ignore && ! v->maybe_dead && v->always_computable
8821 && v->mode == mode
8822 && (GET_CODE (v->add_val) == SYMBOL_REF
8823 || GET_CODE (v->add_val) == LABEL_REF
8824 || GET_CODE (v->add_val) == CONST
8825 || (GET_CODE (v->add_val) == REG
8826 && REG_POINTER (v->add_val))))
8828 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8829 continue;
8831 if (! eliminate_p)
8832 return 1;
8834 /* If the giv has the opposite direction of change,
8835 then reverse the comparison. */
8836 if (INTVAL (v->mult_val) < 0)
8837 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8838 v->new_reg);
8839 else
8840 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8841 copy_rtx (v->add_val));
8843 /* Replace biv with the giv's reduced register. */
8844 update_reg_last_use (v->add_val, insn);
8845 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8846 return 1;
8848 /* Insn doesn't support that constant or invariant. Copy it
8849 into a register (it will be a loop invariant.) */
8850 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8852 loop_insn_emit_before (loop, 0, where_insn,
8853 gen_move_insn (tem,
8854 copy_rtx (v->add_val)));
8856 /* Substitute the new register for its invariant value in
8857 the compare expression. */
8858 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8859 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8860 return 1;
8863 #endif
8864 break;
8866 case COMPARE:
8867 case EQ: case NE:
8868 case GT: case GE: case GTU: case GEU:
8869 case LT: case LE: case LTU: case LEU:
8870 /* See if either argument is the biv. */
8871 if (XEXP (x, 0) == reg)
8872 arg = XEXP (x, 1), arg_operand = 1;
8873 else if (XEXP (x, 1) == reg)
8874 arg = XEXP (x, 0), arg_operand = 0;
8875 else
8876 break;
8878 if (CONSTANT_P (arg))
8880 /* First try to replace with any giv that has constant positive
8881 mult_val and constant add_val. We might be able to support
8882 negative mult_val, but it seems complex to do it in general. */
8884 for (v = bl->giv; v; v = v->next_iv)
8885 if (GET_CODE (v->mult_val) == CONST_INT
8886 && INTVAL (v->mult_val) > 0
8887 && (GET_CODE (v->add_val) == SYMBOL_REF
8888 || GET_CODE (v->add_val) == LABEL_REF
8889 || GET_CODE (v->add_val) == CONST
8890 || (GET_CODE (v->add_val) == REG
8891 && REG_POINTER (v->add_val)))
8892 && ! v->ignore && ! v->maybe_dead && v->always_computable
8893 && v->mode == mode)
8895 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8896 continue;
8898 /* Don't eliminate if the linear combination that makes up
8899 the giv overflows when it is applied to ARG. */
8900 if (GET_CODE (arg) == CONST_INT)
8902 rtx add_val;
8904 if (GET_CODE (v->add_val) == CONST_INT)
8905 add_val = v->add_val;
8906 else
8907 add_val = const0_rtx;
8909 if (const_mult_add_overflow_p (arg, v->mult_val,
8910 add_val, mode, 1))
8911 continue;
8914 if (! eliminate_p)
8915 return 1;
8917 /* Replace biv with the giv's reduced reg. */
8918 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8920 /* If all constants are actually constant integers and
8921 the derived constant can be directly placed in the COMPARE,
8922 do so. */
8923 if (GET_CODE (arg) == CONST_INT
8924 && GET_CODE (v->add_val) == CONST_INT)
8926 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8927 v->add_val, mode, 1);
8929 else
8931 /* Otherwise, load it into a register. */
8932 tem = gen_reg_rtx (mode);
8933 loop_iv_add_mult_emit_before (loop, arg,
8934 v->mult_val, v->add_val,
8935 tem, where_bb, where_insn);
8938 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8940 if (apply_change_group ())
8941 return 1;
8944 /* Look for giv with positive constant mult_val and nonconst add_val.
8945 Insert insns to calculate new compare value.
8946 ??? Turn this off due to possible overflow. */
8948 for (v = bl->giv; v; v = v->next_iv)
8949 if (GET_CODE (v->mult_val) == CONST_INT
8950 && INTVAL (v->mult_val) > 0
8951 && ! v->ignore && ! v->maybe_dead && v->always_computable
8952 && v->mode == mode
8953 && 0)
8955 rtx tem;
8957 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8958 continue;
8960 if (! eliminate_p)
8961 return 1;
8963 tem = gen_reg_rtx (mode);
8965 /* Replace biv with giv's reduced register. */
8966 validate_change (insn, &XEXP (x, 1 - arg_operand),
8967 v->new_reg, 1);
8969 /* Compute value to compare against. */
8970 loop_iv_add_mult_emit_before (loop, arg,
8971 v->mult_val, v->add_val,
8972 tem, where_bb, where_insn);
8973 /* Use it in this insn. */
8974 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8975 if (apply_change_group ())
8976 return 1;
8979 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8981 if (loop_invariant_p (loop, arg) == 1)
8983 /* Look for giv with constant positive mult_val and nonconst
8984 add_val. Insert insns to compute new compare value.
8985 ??? Turn this off due to possible overflow. */
8987 for (v = bl->giv; v; v = v->next_iv)
8988 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8989 && ! v->ignore && ! v->maybe_dead && v->always_computable
8990 && v->mode == mode
8991 && 0)
8993 rtx tem;
8995 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8996 continue;
8998 if (! eliminate_p)
8999 return 1;
9001 tem = gen_reg_rtx (mode);
9003 /* Replace biv with giv's reduced register. */
9004 validate_change (insn, &XEXP (x, 1 - arg_operand),
9005 v->new_reg, 1);
9007 /* Compute value to compare against. */
9008 loop_iv_add_mult_emit_before (loop, arg,
9009 v->mult_val, v->add_val,
9010 tem, where_bb, where_insn);
9011 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
9012 if (apply_change_group ())
9013 return 1;
9017 /* This code has problems. Basically, you can't know when
9018 seeing if we will eliminate BL, whether a particular giv
9019 of ARG will be reduced. If it isn't going to be reduced,
9020 we can't eliminate BL. We can try forcing it to be reduced,
9021 but that can generate poor code.
9023 The problem is that the benefit of reducing TV, below should
9024 be increased if BL can actually be eliminated, but this means
9025 we might have to do a topological sort of the order in which
9026 we try to process biv. It doesn't seem worthwhile to do
9027 this sort of thing now. */
9029 #if 0
9030 /* Otherwise the reg compared with had better be a biv. */
9031 if (GET_CODE (arg) != REG
9032 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9033 return 0;
9035 /* Look for a pair of givs, one for each biv,
9036 with identical coefficients. */
9037 for (v = bl->giv; v; v = v->next_iv)
9039 struct induction *tv;
9041 if (v->ignore || v->maybe_dead || v->mode != mode)
9042 continue;
9044 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9045 tv = tv->next_iv)
9046 if (! tv->ignore && ! tv->maybe_dead
9047 && rtx_equal_p (tv->mult_val, v->mult_val)
9048 && rtx_equal_p (tv->add_val, v->add_val)
9049 && tv->mode == mode)
9051 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9052 continue;
9054 if (! eliminate_p)
9055 return 1;
9057 /* Replace biv with its giv's reduced reg. */
9058 XEXP (x, 1 - arg_operand) = v->new_reg;
9059 /* Replace other operand with the other giv's
9060 reduced reg. */
9061 XEXP (x, arg_operand) = tv->new_reg;
9062 return 1;
9065 #endif
9068 /* If we get here, the biv can't be eliminated. */
9069 return 0;
9071 case MEM:
9072 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9073 biv is used in it, since it will be replaced. */
9074 for (v = bl->giv; v; v = v->next_iv)
9075 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9076 return 1;
9077 break;
9079 default:
9080 break;
9083 /* See if any subexpression fails elimination. */
9084 fmt = GET_RTX_FORMAT (code);
9085 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9087 switch (fmt[i])
9089 case 'e':
9090 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9091 eliminate_p, where_bb, where_insn))
9092 return 0;
9093 break;
9095 case 'E':
9096 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9097 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9098 eliminate_p, where_bb, where_insn))
9099 return 0;
9100 break;
9104 return 1;
9107 /* Return nonzero if the last use of REG
9108 is in an insn following INSN in the same basic block. */
9110 static int
9111 last_use_this_basic_block (reg, insn)
9112 rtx reg;
9113 rtx insn;
9115 rtx n;
9116 for (n = insn;
9117 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9118 n = NEXT_INSN (n))
9120 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9121 return 1;
9123 return 0;
9126 /* Called via `note_stores' to record the initial value of a biv. Here we
9127 just record the location of the set and process it later. */
9129 static void
9130 record_initial (dest, set, data)
9131 rtx dest;
9132 rtx set;
9133 void *data ATTRIBUTE_UNUSED;
9135 struct loop_ivs *ivs = (struct loop_ivs *) data;
9136 struct iv_class *bl;
9138 if (GET_CODE (dest) != REG
9139 || REGNO (dest) >= ivs->n_regs
9140 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9141 return;
9143 bl = REG_IV_CLASS (ivs, REGNO (dest));
9145 /* If this is the first set found, record it. */
9146 if (bl->init_insn == 0)
9148 bl->init_insn = note_insn;
9149 bl->init_set = set;
9153 /* If any of the registers in X are "old" and currently have a last use earlier
9154 than INSN, update them to have a last use of INSN. Their actual last use
9155 will be the previous insn but it will not have a valid uid_luid so we can't
9156 use it. X must be a source expression only. */
9158 static void
9159 update_reg_last_use (x, insn)
9160 rtx x;
9161 rtx insn;
9163 /* Check for the case where INSN does not have a valid luid. In this case,
9164 there is no need to modify the regno_last_uid, as this can only happen
9165 when code is inserted after the loop_end to set a pseudo's final value,
9166 and hence this insn will never be the last use of x.
9167 ???? This comment is not correct. See for example loop_givs_reduce.
9168 This may insert an insn before another new insn. */
9169 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9170 && INSN_UID (insn) < max_uid_for_loop
9171 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9173 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9175 else
9177 int i, j;
9178 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9179 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9181 if (fmt[i] == 'e')
9182 update_reg_last_use (XEXP (x, i), insn);
9183 else if (fmt[i] == 'E')
9184 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9185 update_reg_last_use (XVECEXP (x, i, j), insn);
9190 /* Given an insn INSN and condition COND, return the condition in a
9191 canonical form to simplify testing by callers. Specifically:
9193 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9194 (2) Both operands will be machine operands; (cc0) will have been replaced.
9195 (3) If an operand is a constant, it will be the second operand.
9196 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9197 for GE, GEU, and LEU.
9199 If the condition cannot be understood, or is an inequality floating-point
9200 comparison which needs to be reversed, 0 will be returned.
9202 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9204 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9205 insn used in locating the condition was found. If a replacement test
9206 of the condition is desired, it should be placed in front of that
9207 insn and we will be sure that the inputs are still valid.
9209 If WANT_REG is nonzero, we wish the condition to be relative to that
9210 register, if possible. Therefore, do not canonicalize the condition
9211 further. */
9214 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9215 rtx insn;
9216 rtx cond;
9217 int reverse;
9218 rtx *earliest;
9219 rtx want_reg;
9221 enum rtx_code code;
9222 rtx prev = insn;
9223 rtx set;
9224 rtx tem;
9225 rtx op0, op1;
9226 int reverse_code = 0;
9227 enum machine_mode mode;
9229 code = GET_CODE (cond);
9230 mode = GET_MODE (cond);
9231 op0 = XEXP (cond, 0);
9232 op1 = XEXP (cond, 1);
9234 if (reverse)
9235 code = reversed_comparison_code (cond, insn);
9236 if (code == UNKNOWN)
9237 return 0;
9239 if (earliest)
9240 *earliest = insn;
9242 /* If we are comparing a register with zero, see if the register is set
9243 in the previous insn to a COMPARE or a comparison operation. Perform
9244 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9245 in cse.c */
9247 while (GET_RTX_CLASS (code) == '<'
9248 && op1 == CONST0_RTX (GET_MODE (op0))
9249 && op0 != want_reg)
9251 /* Set nonzero when we find something of interest. */
9252 rtx x = 0;
9254 #ifdef HAVE_cc0
9255 /* If comparison with cc0, import actual comparison from compare
9256 insn. */
9257 if (op0 == cc0_rtx)
9259 if ((prev = prev_nonnote_insn (prev)) == 0
9260 || GET_CODE (prev) != INSN
9261 || (set = single_set (prev)) == 0
9262 || SET_DEST (set) != cc0_rtx)
9263 return 0;
9265 op0 = SET_SRC (set);
9266 op1 = CONST0_RTX (GET_MODE (op0));
9267 if (earliest)
9268 *earliest = prev;
9270 #endif
9272 /* If this is a COMPARE, pick up the two things being compared. */
9273 if (GET_CODE (op0) == COMPARE)
9275 op1 = XEXP (op0, 1);
9276 op0 = XEXP (op0, 0);
9277 continue;
9279 else if (GET_CODE (op0) != REG)
9280 break;
9282 /* Go back to the previous insn. Stop if it is not an INSN. We also
9283 stop if it isn't a single set or if it has a REG_INC note because
9284 we don't want to bother dealing with it. */
9286 if ((prev = prev_nonnote_insn (prev)) == 0
9287 || GET_CODE (prev) != INSN
9288 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9289 break;
9291 set = set_of (op0, prev);
9293 if (set
9294 && (GET_CODE (set) != SET
9295 || !rtx_equal_p (SET_DEST (set), op0)))
9296 break;
9298 /* If this is setting OP0, get what it sets it to if it looks
9299 relevant. */
9300 if (set)
9302 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9303 #ifdef FLOAT_STORE_FLAG_VALUE
9304 REAL_VALUE_TYPE fsfv;
9305 #endif
9307 /* ??? We may not combine comparisons done in a CCmode with
9308 comparisons not done in a CCmode. This is to aid targets
9309 like Alpha that have an IEEE compliant EQ instruction, and
9310 a non-IEEE compliant BEQ instruction. The use of CCmode is
9311 actually artificial, simply to prevent the combination, but
9312 should not affect other platforms.
9314 However, we must allow VOIDmode comparisons to match either
9315 CCmode or non-CCmode comparison, because some ports have
9316 modeless comparisons inside branch patterns.
9318 ??? This mode check should perhaps look more like the mode check
9319 in simplify_comparison in combine. */
9321 if ((GET_CODE (SET_SRC (set)) == COMPARE
9322 || (((code == NE
9323 || (code == LT
9324 && GET_MODE_CLASS (inner_mode) == MODE_INT
9325 && (GET_MODE_BITSIZE (inner_mode)
9326 <= HOST_BITS_PER_WIDE_INT)
9327 && (STORE_FLAG_VALUE
9328 & ((HOST_WIDE_INT) 1
9329 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9330 #ifdef FLOAT_STORE_FLAG_VALUE
9331 || (code == LT
9332 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9333 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9334 REAL_VALUE_NEGATIVE (fsfv)))
9335 #endif
9337 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9338 && (((GET_MODE_CLASS (mode) == MODE_CC)
9339 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9340 || mode == VOIDmode || inner_mode == VOIDmode))
9341 x = SET_SRC (set);
9342 else if (((code == EQ
9343 || (code == GE
9344 && (GET_MODE_BITSIZE (inner_mode)
9345 <= HOST_BITS_PER_WIDE_INT)
9346 && GET_MODE_CLASS (inner_mode) == MODE_INT
9347 && (STORE_FLAG_VALUE
9348 & ((HOST_WIDE_INT) 1
9349 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9350 #ifdef FLOAT_STORE_FLAG_VALUE
9351 || (code == GE
9352 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9353 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9354 REAL_VALUE_NEGATIVE (fsfv)))
9355 #endif
9357 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9358 && (((GET_MODE_CLASS (mode) == MODE_CC)
9359 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9360 || mode == VOIDmode || inner_mode == VOIDmode))
9363 reverse_code = 1;
9364 x = SET_SRC (set);
9366 else
9367 break;
9370 else if (reg_set_p (op0, prev))
9371 /* If this sets OP0, but not directly, we have to give up. */
9372 break;
9374 if (x)
9376 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9377 code = GET_CODE (x);
9378 if (reverse_code)
9380 code = reversed_comparison_code (x, prev);
9381 if (code == UNKNOWN)
9382 return 0;
9383 reverse_code = 0;
9386 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9387 if (earliest)
9388 *earliest = prev;
9392 /* If constant is first, put it last. */
9393 if (CONSTANT_P (op0))
9394 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9396 /* If OP0 is the result of a comparison, we weren't able to find what
9397 was really being compared, so fail. */
9398 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9399 return 0;
9401 /* Canonicalize any ordered comparison with integers involving equality
9402 if we can do computations in the relevant mode and we do not
9403 overflow. */
9405 if (GET_CODE (op1) == CONST_INT
9406 && GET_MODE (op0) != VOIDmode
9407 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9409 HOST_WIDE_INT const_val = INTVAL (op1);
9410 unsigned HOST_WIDE_INT uconst_val = const_val;
9411 unsigned HOST_WIDE_INT max_val
9412 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9414 switch (code)
9416 case LE:
9417 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9418 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9419 break;
9421 /* When cross-compiling, const_val might be sign-extended from
9422 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9423 case GE:
9424 if ((HOST_WIDE_INT) (const_val & max_val)
9425 != (((HOST_WIDE_INT) 1
9426 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9427 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9428 break;
9430 case LEU:
9431 if (uconst_val < max_val)
9432 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9433 break;
9435 case GEU:
9436 if (uconst_val != 0)
9437 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9438 break;
9440 default:
9441 break;
9445 #ifdef HAVE_cc0
9446 /* Never return CC0; return zero instead. */
9447 if (op0 == cc0_rtx)
9448 return 0;
9449 #endif
9451 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9454 /* Given a jump insn JUMP, return the condition that will cause it to branch
9455 to its JUMP_LABEL. If the condition cannot be understood, or is an
9456 inequality floating-point comparison which needs to be reversed, 0 will
9457 be returned.
9459 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9460 insn used in locating the condition was found. If a replacement test
9461 of the condition is desired, it should be placed in front of that
9462 insn and we will be sure that the inputs are still valid. */
9465 get_condition (jump, earliest)
9466 rtx jump;
9467 rtx *earliest;
9469 rtx cond;
9470 int reverse;
9471 rtx set;
9473 /* If this is not a standard conditional jump, we can't parse it. */
9474 if (GET_CODE (jump) != JUMP_INSN
9475 || ! any_condjump_p (jump))
9476 return 0;
9477 set = pc_set (jump);
9479 cond = XEXP (SET_SRC (set), 0);
9481 /* If this branches to JUMP_LABEL when the condition is false, reverse
9482 the condition. */
9483 reverse
9484 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9485 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9487 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9490 /* Similar to above routine, except that we also put an invariant last
9491 unless both operands are invariants. */
9494 get_condition_for_loop (loop, x)
9495 const struct loop *loop;
9496 rtx x;
9498 rtx comparison = get_condition (x, (rtx*) 0);
9500 if (comparison == 0
9501 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9502 || loop_invariant_p (loop, XEXP (comparison, 1)))
9503 return comparison;
9505 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9506 XEXP (comparison, 1), XEXP (comparison, 0));
9509 /* Scan the function and determine whether it has indirect (computed) jumps.
9511 This is taken mostly from flow.c; similar code exists elsewhere
9512 in the compiler. It may be useful to put this into rtlanal.c. */
9513 static int
9514 indirect_jump_in_function_p (start)
9515 rtx start;
9517 rtx insn;
9519 for (insn = start; insn; insn = NEXT_INSN (insn))
9520 if (computed_jump_p (insn))
9521 return 1;
9523 return 0;
9526 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9527 documentation for LOOP_MEMS for the definition of `appropriate'.
9528 This function is called from prescan_loop via for_each_rtx. */
9530 static int
9531 insert_loop_mem (mem, data)
9532 rtx *mem;
9533 void *data ATTRIBUTE_UNUSED;
9535 struct loop_info *loop_info = data;
9536 int i;
9537 rtx m = *mem;
9539 if (m == NULL_RTX)
9540 return 0;
9542 switch (GET_CODE (m))
9544 case MEM:
9545 break;
9547 case CLOBBER:
9548 /* We're not interested in MEMs that are only clobbered. */
9549 return -1;
9551 case CONST_DOUBLE:
9552 /* We're not interested in the MEM associated with a
9553 CONST_DOUBLE, so there's no need to traverse into this. */
9554 return -1;
9556 case EXPR_LIST:
9557 /* We're not interested in any MEMs that only appear in notes. */
9558 return -1;
9560 default:
9561 /* This is not a MEM. */
9562 return 0;
9565 /* See if we've already seen this MEM. */
9566 for (i = 0; i < loop_info->mems_idx; ++i)
9567 if (rtx_equal_p (m, loop_info->mems[i].mem))
9569 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9570 /* The modes of the two memory accesses are different. If
9571 this happens, something tricky is going on, and we just
9572 don't optimize accesses to this MEM. */
9573 loop_info->mems[i].optimize = 0;
9575 return 0;
9578 /* Resize the array, if necessary. */
9579 if (loop_info->mems_idx == loop_info->mems_allocated)
9581 if (loop_info->mems_allocated != 0)
9582 loop_info->mems_allocated *= 2;
9583 else
9584 loop_info->mems_allocated = 32;
9586 loop_info->mems = (loop_mem_info *)
9587 xrealloc (loop_info->mems,
9588 loop_info->mems_allocated * sizeof (loop_mem_info));
9591 /* Actually insert the MEM. */
9592 loop_info->mems[loop_info->mems_idx].mem = m;
9593 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9594 because we can't put it in a register. We still store it in the
9595 table, though, so that if we see the same address later, but in a
9596 non-BLK mode, we'll not think we can optimize it at that point. */
9597 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9598 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9599 ++loop_info->mems_idx;
9601 return 0;
9605 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9607 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9608 register that is modified by an insn between FROM and TO. If the
9609 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9610 more, stop incrementing it, to avoid overflow.
9612 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9613 register I is used, if it is only used once. Otherwise, it is set
9614 to 0 (for no uses) or const0_rtx for more than one use. This
9615 parameter may be zero, in which case this processing is not done.
9617 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9618 optimize register I. */
9620 static void
9621 loop_regs_scan (loop, extra_size)
9622 const struct loop *loop;
9623 int extra_size;
9625 struct loop_regs *regs = LOOP_REGS (loop);
9626 int old_nregs;
9627 /* last_set[n] is nonzero iff reg n has been set in the current
9628 basic block. In that case, it is the insn that last set reg n. */
9629 rtx *last_set;
9630 rtx insn;
9631 int i;
9633 old_nregs = regs->num;
9634 regs->num = max_reg_num ();
9636 /* Grow the regs array if not allocated or too small. */
9637 if (regs->num >= regs->size)
9639 regs->size = regs->num + extra_size;
9641 regs->array = (struct loop_reg *)
9642 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9644 /* Zero the new elements. */
9645 memset (regs->array + old_nregs, 0,
9646 (regs->size - old_nregs) * sizeof (*regs->array));
9649 /* Clear previously scanned fields but do not clear n_times_set. */
9650 for (i = 0; i < old_nregs; i++)
9652 regs->array[i].set_in_loop = 0;
9653 regs->array[i].may_not_optimize = 0;
9654 regs->array[i].single_usage = NULL_RTX;
9657 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9659 /* Scan the loop, recording register usage. */
9660 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9661 insn = NEXT_INSN (insn))
9663 if (INSN_P (insn))
9665 /* Record registers that have exactly one use. */
9666 find_single_use_in_loop (regs, insn, PATTERN (insn));
9668 /* Include uses in REG_EQUAL notes. */
9669 if (REG_NOTES (insn))
9670 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9672 if (GET_CODE (PATTERN (insn)) == SET
9673 || GET_CODE (PATTERN (insn)) == CLOBBER)
9674 count_one_set (regs, insn, PATTERN (insn), last_set);
9675 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9677 int i;
9678 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9679 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9680 last_set);
9684 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9685 memset (last_set, 0, regs->num * sizeof (rtx));
9687 /* Invalidate all registers used for function argument passing.
9688 We check rtx_varies_p for the same reason as below, to allow
9689 optimizing PIC calculations. */
9690 if (GET_CODE (insn) == CALL_INSN)
9692 rtx link;
9693 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9694 link;
9695 link = XEXP (link, 1))
9697 rtx op, reg;
9699 if (GET_CODE (op = XEXP (link, 0)) == USE
9700 && GET_CODE (reg = XEXP (op, 0)) == REG
9701 && rtx_varies_p (reg, 1))
9702 regs->array[REGNO (reg)].may_not_optimize = 1;
9707 /* Invalidate all hard registers clobbered by calls. With one exception:
9708 a call-clobbered PIC register is still function-invariant for our
9709 purposes, since we can hoist any PIC calculations out of the loop.
9710 Thus the call to rtx_varies_p. */
9711 if (LOOP_INFO (loop)->has_call)
9712 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9713 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9714 && rtx_varies_p (regno_reg_rtx[i], 1))
9716 regs->array[i].may_not_optimize = 1;
9717 regs->array[i].set_in_loop = 1;
9720 #ifdef AVOID_CCMODE_COPIES
9721 /* Don't try to move insns which set CC registers if we should not
9722 create CCmode register copies. */
9723 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9724 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9725 regs->array[i].may_not_optimize = 1;
9726 #endif
9728 /* Set regs->array[I].n_times_set for the new registers. */
9729 for (i = old_nregs; i < regs->num; i++)
9730 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9732 free (last_set);
9735 /* Returns the number of real INSNs in the LOOP. */
9737 static int
9738 count_insns_in_loop (loop)
9739 const struct loop *loop;
9741 int count = 0;
9742 rtx insn;
9744 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9745 insn = NEXT_INSN (insn))
9746 if (INSN_P (insn))
9747 ++count;
9749 return count;
9752 /* Move MEMs into registers for the duration of the loop. */
9754 static void
9755 load_mems (loop)
9756 const struct loop *loop;
9758 struct loop_info *loop_info = LOOP_INFO (loop);
9759 struct loop_regs *regs = LOOP_REGS (loop);
9760 int maybe_never = 0;
9761 int i;
9762 rtx p, prev_ebb_head;
9763 rtx label = NULL_RTX;
9764 rtx end_label;
9765 /* Nonzero if the next instruction may never be executed. */
9766 int next_maybe_never = 0;
9767 unsigned int last_max_reg = max_reg_num ();
9769 if (loop_info->mems_idx == 0)
9770 return;
9772 /* We cannot use next_label here because it skips over normal insns. */
9773 end_label = next_nonnote_insn (loop->end);
9774 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9775 end_label = NULL_RTX;
9777 /* Check to see if it's possible that some instructions in the loop are
9778 never executed. Also check if there is a goto out of the loop other
9779 than right after the end of the loop. */
9780 for (p = next_insn_in_loop (loop, loop->scan_start);
9781 p != NULL_RTX;
9782 p = next_insn_in_loop (loop, p))
9784 if (GET_CODE (p) == CODE_LABEL)
9785 maybe_never = 1;
9786 else if (GET_CODE (p) == JUMP_INSN
9787 /* If we enter the loop in the middle, and scan
9788 around to the beginning, don't set maybe_never
9789 for that. This must be an unconditional jump,
9790 otherwise the code at the top of the loop might
9791 never be executed. Unconditional jumps are
9792 followed a by barrier then loop end. */
9793 && ! (GET_CODE (p) == JUMP_INSN
9794 && JUMP_LABEL (p) == loop->top
9795 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9796 && any_uncondjump_p (p)))
9798 /* If this is a jump outside of the loop but not right
9799 after the end of the loop, we would have to emit new fixup
9800 sequences for each such label. */
9801 if (/* If we can't tell where control might go when this
9802 JUMP_INSN is executed, we must be conservative. */
9803 !JUMP_LABEL (p)
9804 || (JUMP_LABEL (p) != end_label
9805 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9806 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9807 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9808 return;
9810 if (!any_condjump_p (p))
9811 /* Something complicated. */
9812 maybe_never = 1;
9813 else
9814 /* If there are any more instructions in the loop, they
9815 might not be reached. */
9816 next_maybe_never = 1;
9818 else if (next_maybe_never)
9819 maybe_never = 1;
9822 /* Find start of the extended basic block that enters the loop. */
9823 for (p = loop->start;
9824 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9825 p = PREV_INSN (p))
9827 prev_ebb_head = p;
9829 cselib_init ();
9831 /* Build table of mems that get set to constant values before the
9832 loop. */
9833 for (; p != loop->start; p = NEXT_INSN (p))
9834 cselib_process_insn (p);
9836 /* Actually move the MEMs. */
9837 for (i = 0; i < loop_info->mems_idx; ++i)
9839 regset_head load_copies;
9840 regset_head store_copies;
9841 int written = 0;
9842 rtx reg;
9843 rtx mem = loop_info->mems[i].mem;
9844 rtx mem_list_entry;
9846 if (MEM_VOLATILE_P (mem)
9847 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9848 /* There's no telling whether or not MEM is modified. */
9849 loop_info->mems[i].optimize = 0;
9851 /* Go through the MEMs written to in the loop to see if this
9852 one is aliased by one of them. */
9853 mem_list_entry = loop_info->store_mems;
9854 while (mem_list_entry)
9856 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9857 written = 1;
9858 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9859 mem, rtx_varies_p))
9861 /* MEM is indeed aliased by this store. */
9862 loop_info->mems[i].optimize = 0;
9863 break;
9865 mem_list_entry = XEXP (mem_list_entry, 1);
9868 if (flag_float_store && written
9869 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9870 loop_info->mems[i].optimize = 0;
9872 /* If this MEM is written to, we must be sure that there
9873 are no reads from another MEM that aliases this one. */
9874 if (loop_info->mems[i].optimize && written)
9876 int j;
9878 for (j = 0; j < loop_info->mems_idx; ++j)
9880 if (j == i)
9881 continue;
9882 else if (true_dependence (mem,
9883 VOIDmode,
9884 loop_info->mems[j].mem,
9885 rtx_varies_p))
9887 /* It's not safe to hoist loop_info->mems[i] out of
9888 the loop because writes to it might not be
9889 seen by reads from loop_info->mems[j]. */
9890 loop_info->mems[i].optimize = 0;
9891 break;
9896 if (maybe_never && may_trap_p (mem))
9897 /* We can't access the MEM outside the loop; it might
9898 cause a trap that wouldn't have happened otherwise. */
9899 loop_info->mems[i].optimize = 0;
9901 if (!loop_info->mems[i].optimize)
9902 /* We thought we were going to lift this MEM out of the
9903 loop, but later discovered that we could not. */
9904 continue;
9906 INIT_REG_SET (&load_copies);
9907 INIT_REG_SET (&store_copies);
9909 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9910 order to keep scan_loop from moving stores to this MEM
9911 out of the loop just because this REG is neither a
9912 user-variable nor used in the loop test. */
9913 reg = gen_reg_rtx (GET_MODE (mem));
9914 REG_USERVAR_P (reg) = 1;
9915 loop_info->mems[i].reg = reg;
9917 /* Now, replace all references to the MEM with the
9918 corresponding pseudos. */
9919 maybe_never = 0;
9920 for (p = next_insn_in_loop (loop, loop->scan_start);
9921 p != NULL_RTX;
9922 p = next_insn_in_loop (loop, p))
9924 if (INSN_P (p))
9926 rtx set;
9928 set = single_set (p);
9930 /* See if this copies the mem into a register that isn't
9931 modified afterwards. We'll try to do copy propagation
9932 a little further on. */
9933 if (set
9934 /* @@@ This test is _way_ too conservative. */
9935 && ! maybe_never
9936 && GET_CODE (SET_DEST (set)) == REG
9937 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9938 && REGNO (SET_DEST (set)) < last_max_reg
9939 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9940 && rtx_equal_p (SET_SRC (set), mem))
9941 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9943 /* See if this copies the mem from a register that isn't
9944 modified afterwards. We'll try to remove the
9945 redundant copy later on by doing a little register
9946 renaming and copy propagation. This will help
9947 to untangle things for the BIV detection code. */
9948 if (set
9949 && ! maybe_never
9950 && GET_CODE (SET_SRC (set)) == REG
9951 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9952 && REGNO (SET_SRC (set)) < last_max_reg
9953 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9954 && rtx_equal_p (SET_DEST (set), mem))
9955 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9957 /* If this is a call which uses / clobbers this memory
9958 location, we must not change the interface here. */
9959 if (GET_CODE (p) == CALL_INSN
9960 && reg_mentioned_p (loop_info->mems[i].mem,
9961 CALL_INSN_FUNCTION_USAGE (p)))
9963 cancel_changes (0);
9964 loop_info->mems[i].optimize = 0;
9965 break;
9967 else
9968 /* Replace the memory reference with the shadow register. */
9969 replace_loop_mems (p, loop_info->mems[i].mem,
9970 loop_info->mems[i].reg);
9973 if (GET_CODE (p) == CODE_LABEL
9974 || GET_CODE (p) == JUMP_INSN)
9975 maybe_never = 1;
9978 if (! loop_info->mems[i].optimize)
9979 ; /* We found we couldn't do the replacement, so do nothing. */
9980 else if (! apply_change_group ())
9981 /* We couldn't replace all occurrences of the MEM. */
9982 loop_info->mems[i].optimize = 0;
9983 else
9985 /* Load the memory immediately before LOOP->START, which is
9986 the NOTE_LOOP_BEG. */
9987 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9988 rtx set;
9989 rtx best = mem;
9990 int j;
9991 struct elt_loc_list *const_equiv = 0;
9993 if (e)
9995 struct elt_loc_list *equiv;
9996 struct elt_loc_list *best_equiv = 0;
9997 for (equiv = e->locs; equiv; equiv = equiv->next)
9999 if (CONSTANT_P (equiv->loc))
10000 const_equiv = equiv;
10001 else if (GET_CODE (equiv->loc) == REG
10002 /* Extending hard register lifetimes causes crash
10003 on SRC targets. Doing so on non-SRC is
10004 probably also not good idea, since we most
10005 probably have pseudoregister equivalence as
10006 well. */
10007 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10008 best_equiv = equiv;
10010 /* Use the constant equivalence if that is cheap enough. */
10011 if (! best_equiv)
10012 best_equiv = const_equiv;
10013 else if (const_equiv
10014 && (rtx_cost (const_equiv->loc, SET)
10015 <= rtx_cost (best_equiv->loc, SET)))
10017 best_equiv = const_equiv;
10018 const_equiv = 0;
10021 /* If best_equiv is nonzero, we know that MEM is set to a
10022 constant or register before the loop. We will use this
10023 knowledge to initialize the shadow register with that
10024 constant or reg rather than by loading from MEM. */
10025 if (best_equiv)
10026 best = copy_rtx (best_equiv->loc);
10029 set = gen_move_insn (reg, best);
10030 set = loop_insn_hoist (loop, set);
10031 if (REG_P (best))
10033 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10034 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10036 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10037 break;
10041 if (const_equiv)
10042 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10044 if (written)
10046 if (label == NULL_RTX)
10048 label = gen_label_rtx ();
10049 emit_label_after (label, loop->end);
10052 /* Store the memory immediately after END, which is
10053 the NOTE_LOOP_END. */
10054 set = gen_move_insn (copy_rtx (mem), reg);
10055 loop_insn_emit_after (loop, 0, label, set);
10058 if (loop_dump_stream)
10060 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10061 REGNO (reg), (written ? "r/w" : "r/o"));
10062 print_rtl (loop_dump_stream, mem);
10063 fputc ('\n', loop_dump_stream);
10066 /* Attempt a bit of copy propagation. This helps untangle the
10067 data flow, and enables {basic,general}_induction_var to find
10068 more bivs/givs. */
10069 EXECUTE_IF_SET_IN_REG_SET
10070 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10072 try_copy_prop (loop, reg, j);
10074 CLEAR_REG_SET (&load_copies);
10076 EXECUTE_IF_SET_IN_REG_SET
10077 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10079 try_swap_copy_prop (loop, reg, j);
10081 CLEAR_REG_SET (&store_copies);
10085 if (label != NULL_RTX && end_label != NULL_RTX)
10087 /* Now, we need to replace all references to the previous exit
10088 label with the new one. */
10089 rtx_pair rr;
10090 rr.r1 = end_label;
10091 rr.r2 = label;
10093 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10095 for_each_rtx (&p, replace_label, &rr);
10097 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10098 field. This is not handled by for_each_rtx because it doesn't
10099 handle unprinted ('0') fields. We need to update JUMP_LABEL
10100 because the immediately following unroll pass will use it.
10101 replace_label would not work anyways, because that only handles
10102 LABEL_REFs. */
10103 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10104 JUMP_LABEL (p) = label;
10108 cselib_finish ();
10111 /* For communication between note_reg_stored and its caller. */
10112 struct note_reg_stored_arg
10114 int set_seen;
10115 rtx reg;
10118 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10119 is equal to ARG. */
10120 static void
10121 note_reg_stored (x, setter, arg)
10122 rtx x, setter ATTRIBUTE_UNUSED;
10123 void *arg;
10125 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10126 if (t->reg == x)
10127 t->set_seen = 1;
10130 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10131 There must be exactly one insn that sets this pseudo; it will be
10132 deleted if all replacements succeed and we can prove that the register
10133 is not used after the loop. */
10135 static void
10136 try_copy_prop (loop, replacement, regno)
10137 const struct loop *loop;
10138 rtx replacement;
10139 unsigned int regno;
10141 /* This is the reg that we are copying from. */
10142 rtx reg_rtx = regno_reg_rtx[regno];
10143 rtx init_insn = 0;
10144 rtx insn;
10145 /* These help keep track of whether we replaced all uses of the reg. */
10146 int replaced_last = 0;
10147 int store_is_first = 0;
10149 for (insn = next_insn_in_loop (loop, loop->scan_start);
10150 insn != NULL_RTX;
10151 insn = next_insn_in_loop (loop, insn))
10153 rtx set;
10155 /* Only substitute within one extended basic block from the initializing
10156 insn. */
10157 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10158 break;
10160 if (! INSN_P (insn))
10161 continue;
10163 /* Is this the initializing insn? */
10164 set = single_set (insn);
10165 if (set
10166 && GET_CODE (SET_DEST (set)) == REG
10167 && REGNO (SET_DEST (set)) == regno)
10169 if (init_insn)
10170 abort ();
10172 init_insn = insn;
10173 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10174 store_is_first = 1;
10177 /* Only substitute after seeing the initializing insn. */
10178 if (init_insn && insn != init_insn)
10180 struct note_reg_stored_arg arg;
10182 replace_loop_regs (insn, reg_rtx, replacement);
10183 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10184 replaced_last = 1;
10186 /* Stop replacing when REPLACEMENT is modified. */
10187 arg.reg = replacement;
10188 arg.set_seen = 0;
10189 note_stores (PATTERN (insn), note_reg_stored, &arg);
10190 if (arg.set_seen)
10192 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10194 /* It is possible that we've turned previously valid REG_EQUAL to
10195 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10196 REPLACEMENT is modified, we get different meaning. */
10197 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10198 remove_note (insn, note);
10199 break;
10203 if (! init_insn)
10204 abort ();
10205 if (apply_change_group ())
10207 if (loop_dump_stream)
10208 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10209 if (store_is_first && replaced_last)
10211 rtx first;
10212 rtx retval_note;
10214 /* Assume we're just deleting INIT_INSN. */
10215 first = init_insn;
10216 /* Look for REG_RETVAL note. If we're deleting the end of
10217 the libcall sequence, the whole sequence can go. */
10218 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10219 /* If we found a REG_RETVAL note, find the first instruction
10220 in the sequence. */
10221 if (retval_note)
10222 first = XEXP (retval_note, 0);
10224 /* Delete the instructions. */
10225 loop_delete_insns (first, init_insn);
10227 if (loop_dump_stream)
10228 fprintf (loop_dump_stream, ".\n");
10232 /* Replace all the instructions from FIRST up to and including LAST
10233 with NOTE_INSN_DELETED notes. */
10235 static void
10236 loop_delete_insns (first, last)
10237 rtx first;
10238 rtx last;
10240 while (1)
10242 if (loop_dump_stream)
10243 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10244 INSN_UID (first));
10245 delete_insn (first);
10247 /* If this was the LAST instructions we're supposed to delete,
10248 we're done. */
10249 if (first == last)
10250 break;
10252 first = NEXT_INSN (first);
10256 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10257 loop LOOP if the order of the sets of these registers can be
10258 swapped. There must be exactly one insn within the loop that sets
10259 this pseudo followed immediately by a move insn that sets
10260 REPLACEMENT with REGNO. */
10261 static void
10262 try_swap_copy_prop (loop, replacement, regno)
10263 const struct loop *loop;
10264 rtx replacement;
10265 unsigned int regno;
10267 rtx insn;
10268 rtx set = NULL_RTX;
10269 unsigned int new_regno;
10271 new_regno = REGNO (replacement);
10273 for (insn = next_insn_in_loop (loop, loop->scan_start);
10274 insn != NULL_RTX;
10275 insn = next_insn_in_loop (loop, insn))
10277 /* Search for the insn that copies REGNO to NEW_REGNO? */
10278 if (INSN_P (insn)
10279 && (set = single_set (insn))
10280 && GET_CODE (SET_DEST (set)) == REG
10281 && REGNO (SET_DEST (set)) == new_regno
10282 && GET_CODE (SET_SRC (set)) == REG
10283 && REGNO (SET_SRC (set)) == regno)
10284 break;
10287 if (insn != NULL_RTX)
10289 rtx prev_insn;
10290 rtx prev_set;
10292 /* Some DEF-USE info would come in handy here to make this
10293 function more general. For now, just check the previous insn
10294 which is the most likely candidate for setting REGNO. */
10296 prev_insn = PREV_INSN (insn);
10298 if (INSN_P (insn)
10299 && (prev_set = single_set (prev_insn))
10300 && GET_CODE (SET_DEST (prev_set)) == REG
10301 && REGNO (SET_DEST (prev_set)) == regno)
10303 /* We have:
10304 (set (reg regno) (expr))
10305 (set (reg new_regno) (reg regno))
10307 so try converting this to:
10308 (set (reg new_regno) (expr))
10309 (set (reg regno) (reg new_regno))
10311 The former construct is often generated when a global
10312 variable used for an induction variable is shadowed by a
10313 register (NEW_REGNO). The latter construct improves the
10314 chances of GIV replacement and BIV elimination. */
10316 validate_change (prev_insn, &SET_DEST (prev_set),
10317 replacement, 1);
10318 validate_change (insn, &SET_DEST (set),
10319 SET_SRC (set), 1);
10320 validate_change (insn, &SET_SRC (set),
10321 replacement, 1);
10323 if (apply_change_group ())
10325 if (loop_dump_stream)
10326 fprintf (loop_dump_stream,
10327 " Swapped set of reg %d at %d with reg %d at %d.\n",
10328 regno, INSN_UID (insn),
10329 new_regno, INSN_UID (prev_insn));
10331 /* Update first use of REGNO. */
10332 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10333 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10335 /* Now perform copy propagation to hopefully
10336 remove all uses of REGNO within the loop. */
10337 try_copy_prop (loop, replacement, regno);
10343 /* Replace MEM with its associated pseudo register. This function is
10344 called from load_mems via for_each_rtx. DATA is actually a pointer
10345 to a structure describing the instruction currently being scanned
10346 and the MEM we are currently replacing. */
10348 static int
10349 replace_loop_mem (mem, data)
10350 rtx *mem;
10351 void *data;
10353 loop_replace_args *args = (loop_replace_args *) data;
10354 rtx m = *mem;
10356 if (m == NULL_RTX)
10357 return 0;
10359 switch (GET_CODE (m))
10361 case MEM:
10362 break;
10364 case CONST_DOUBLE:
10365 /* We're not interested in the MEM associated with a
10366 CONST_DOUBLE, so there's no need to traverse into one. */
10367 return -1;
10369 default:
10370 /* This is not a MEM. */
10371 return 0;
10374 if (!rtx_equal_p (args->match, m))
10375 /* This is not the MEM we are currently replacing. */
10376 return 0;
10378 /* Actually replace the MEM. */
10379 validate_change (args->insn, mem, args->replacement, 1);
10381 return 0;
10384 static void
10385 replace_loop_mems (insn, mem, reg)
10386 rtx insn;
10387 rtx mem;
10388 rtx reg;
10390 loop_replace_args args;
10392 args.insn = insn;
10393 args.match = mem;
10394 args.replacement = reg;
10396 for_each_rtx (&insn, replace_loop_mem, &args);
10399 /* Replace one register with another. Called through for_each_rtx; PX points
10400 to the rtx being scanned. DATA is actually a pointer to
10401 a structure of arguments. */
10403 static int
10404 replace_loop_reg (px, data)
10405 rtx *px;
10406 void *data;
10408 rtx x = *px;
10409 loop_replace_args *args = (loop_replace_args *) data;
10411 if (x == NULL_RTX)
10412 return 0;
10414 if (x == args->match)
10415 validate_change (args->insn, px, args->replacement, 1);
10417 return 0;
10420 static void
10421 replace_loop_regs (insn, reg, replacement)
10422 rtx insn;
10423 rtx reg;
10424 rtx replacement;
10426 loop_replace_args args;
10428 args.insn = insn;
10429 args.match = reg;
10430 args.replacement = replacement;
10432 for_each_rtx (&insn, replace_loop_reg, &args);
10435 /* Replace occurrences of the old exit label for the loop with the new
10436 one. DATA is an rtx_pair containing the old and new labels,
10437 respectively. */
10439 static int
10440 replace_label (x, data)
10441 rtx *x;
10442 void *data;
10444 rtx l = *x;
10445 rtx old_label = ((rtx_pair *) data)->r1;
10446 rtx new_label = ((rtx_pair *) data)->r2;
10448 if (l == NULL_RTX)
10449 return 0;
10451 if (GET_CODE (l) != LABEL_REF)
10452 return 0;
10454 if (XEXP (l, 0) != old_label)
10455 return 0;
10457 XEXP (l, 0) = new_label;
10458 ++LABEL_NUSES (new_label);
10459 --LABEL_NUSES (old_label);
10461 return 0;
10464 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10465 (ignored in the interim). */
10467 static rtx
10468 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10469 const struct loop *loop ATTRIBUTE_UNUSED;
10470 basic_block where_bb ATTRIBUTE_UNUSED;
10471 rtx where_insn;
10472 rtx pattern;
10474 return emit_insn_after (pattern, where_insn);
10478 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10479 in basic block WHERE_BB (ignored in the interim) within the loop
10480 otherwise hoist PATTERN into the loop pre-header. */
10483 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10484 const struct loop *loop;
10485 basic_block where_bb ATTRIBUTE_UNUSED;
10486 rtx where_insn;
10487 rtx pattern;
10489 if (! where_insn)
10490 return loop_insn_hoist (loop, pattern);
10491 return emit_insn_before (pattern, where_insn);
10495 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10496 WHERE_BB (ignored in the interim) within the loop. */
10498 static rtx
10499 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10500 const struct loop *loop ATTRIBUTE_UNUSED;
10501 basic_block where_bb ATTRIBUTE_UNUSED;
10502 rtx where_insn;
10503 rtx pattern;
10505 return emit_call_insn_before (pattern, where_insn);
10509 /* Hoist insn for PATTERN into the loop pre-header. */
10512 loop_insn_hoist (loop, pattern)
10513 const struct loop *loop;
10514 rtx pattern;
10516 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10520 /* Hoist call insn for PATTERN into the loop pre-header. */
10522 static rtx
10523 loop_call_insn_hoist (loop, pattern)
10524 const struct loop *loop;
10525 rtx pattern;
10527 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10531 /* Sink insn for PATTERN after the loop end. */
10534 loop_insn_sink (loop, pattern)
10535 const struct loop *loop;
10536 rtx pattern;
10538 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10541 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10542 and constant. Emit sequence of instructions to load it into REG. */
10543 static rtx
10544 gen_load_of_final_value (reg, final_value)
10545 rtx reg, final_value;
10547 rtx seq;
10548 start_sequence ();
10549 final_value = force_operand (final_value, reg);
10550 if (final_value != reg)
10551 emit_move_insn (reg, final_value);
10552 seq = get_insns ();
10553 end_sequence ();
10554 return seq;
10557 /* If the loop has multiple exits, emit insn for PATTERN before the
10558 loop to ensure that it will always be executed no matter how the
10559 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10560 since this is slightly more efficient. */
10562 static rtx
10563 loop_insn_sink_or_swim (loop, pattern)
10564 const struct loop *loop;
10565 rtx pattern;
10567 if (loop->exit_count)
10568 return loop_insn_hoist (loop, pattern);
10569 else
10570 return loop_insn_sink (loop, pattern);
10573 static void
10574 loop_ivs_dump (loop, file, verbose)
10575 const struct loop *loop;
10576 FILE *file;
10577 int verbose;
10579 struct iv_class *bl;
10580 int iv_num = 0;
10582 if (! loop || ! file)
10583 return;
10585 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10586 iv_num++;
10588 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10590 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10592 loop_iv_class_dump (bl, file, verbose);
10593 fputc ('\n', file);
10598 static void
10599 loop_iv_class_dump (bl, file, verbose)
10600 const struct iv_class *bl;
10601 FILE *file;
10602 int verbose ATTRIBUTE_UNUSED;
10604 struct induction *v;
10605 rtx incr;
10606 int i;
10608 if (! bl || ! file)
10609 return;
10611 fprintf (file, "IV class for reg %d, benefit %d\n",
10612 bl->regno, bl->total_benefit);
10614 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10615 if (bl->initial_value)
10617 fprintf (file, ", init val: ");
10618 print_simple_rtl (file, bl->initial_value);
10620 if (bl->initial_test)
10622 fprintf (file, ", init test: ");
10623 print_simple_rtl (file, bl->initial_test);
10625 fputc ('\n', file);
10627 if (bl->final_value)
10629 fprintf (file, " Final val: ");
10630 print_simple_rtl (file, bl->final_value);
10631 fputc ('\n', file);
10634 if ((incr = biv_total_increment (bl)))
10636 fprintf (file, " Total increment: ");
10637 print_simple_rtl (file, incr);
10638 fputc ('\n', file);
10641 /* List the increments. */
10642 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10644 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10645 print_simple_rtl (file, v->add_val);
10646 fputc ('\n', file);
10649 /* List the givs. */
10650 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10652 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10653 i, INSN_UID (v->insn), v->benefit);
10654 if (v->giv_type == DEST_ADDR)
10655 print_simple_rtl (file, v->mem);
10656 else
10657 print_simple_rtl (file, single_set (v->insn));
10658 fputc ('\n', file);
10663 static void
10664 loop_biv_dump (v, file, verbose)
10665 const struct induction *v;
10666 FILE *file;
10667 int verbose;
10669 if (! v || ! file)
10670 return;
10672 fprintf (file,
10673 "Biv %d: insn %d",
10674 REGNO (v->dest_reg), INSN_UID (v->insn));
10675 fprintf (file, " const ");
10676 print_simple_rtl (file, v->add_val);
10678 if (verbose && v->final_value)
10680 fputc ('\n', file);
10681 fprintf (file, " final ");
10682 print_simple_rtl (file, v->final_value);
10685 fputc ('\n', file);
10689 static void
10690 loop_giv_dump (v, file, verbose)
10691 const struct induction *v;
10692 FILE *file;
10693 int verbose;
10695 if (! v || ! file)
10696 return;
10698 if (v->giv_type == DEST_REG)
10699 fprintf (file, "Giv %d: insn %d",
10700 REGNO (v->dest_reg), INSN_UID (v->insn));
10701 else
10702 fprintf (file, "Dest address: insn %d",
10703 INSN_UID (v->insn));
10705 fprintf (file, " src reg %d benefit %d",
10706 REGNO (v->src_reg), v->benefit);
10707 fprintf (file, " lifetime %d",
10708 v->lifetime);
10710 if (v->replaceable)
10711 fprintf (file, " replaceable");
10713 if (v->no_const_addval)
10714 fprintf (file, " ncav");
10716 if (v->ext_dependent)
10718 switch (GET_CODE (v->ext_dependent))
10720 case SIGN_EXTEND:
10721 fprintf (file, " ext se");
10722 break;
10723 case ZERO_EXTEND:
10724 fprintf (file, " ext ze");
10725 break;
10726 case TRUNCATE:
10727 fprintf (file, " ext tr");
10728 break;
10729 default:
10730 abort ();
10734 fputc ('\n', file);
10735 fprintf (file, " mult ");
10736 print_simple_rtl (file, v->mult_val);
10738 fputc ('\n', file);
10739 fprintf (file, " add ");
10740 print_simple_rtl (file, v->add_val);
10742 if (verbose && v->final_value)
10744 fputc ('\n', file);
10745 fprintf (file, " final ");
10746 print_simple_rtl (file, v->final_value);
10749 fputc ('\n', file);
10753 void
10754 debug_ivs (loop)
10755 const struct loop *loop;
10757 loop_ivs_dump (loop, stderr, 1);
10761 void
10762 debug_iv_class (bl)
10763 const struct iv_class *bl;
10765 loop_iv_class_dump (bl, stderr, 1);
10769 void
10770 debug_biv (v)
10771 const struct induction *v;
10773 loop_biv_dump (v, stderr, 1);
10777 void
10778 debug_giv (v)
10779 const struct induction *v;
10781 loop_giv_dump (v, stderr, 1);
10785 #define LOOP_BLOCK_NUM_1(INSN) \
10786 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10788 /* The notes do not have an assigned block, so look at the next insn. */
10789 #define LOOP_BLOCK_NUM(INSN) \
10790 ((INSN) ? (GET_CODE (INSN) == NOTE \
10791 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10792 : LOOP_BLOCK_NUM_1 (INSN)) \
10793 : -1)
10795 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10797 static void
10798 loop_dump_aux (loop, file, verbose)
10799 const struct loop *loop;
10800 FILE *file;
10801 int verbose ATTRIBUTE_UNUSED;
10803 rtx label;
10805 if (! loop || ! file)
10806 return;
10808 /* Print diagnostics to compare our concept of a loop with
10809 what the loop notes say. */
10810 if (! PREV_INSN (loop->first->head)
10811 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10812 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10813 != NOTE_INSN_LOOP_BEG)
10814 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10815 INSN_UID (PREV_INSN (loop->first->head)));
10816 if (! NEXT_INSN (loop->last->end)
10817 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10818 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10819 != NOTE_INSN_LOOP_END)
10820 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10821 INSN_UID (NEXT_INSN (loop->last->end)));
10823 if (loop->start)
10825 fprintf (file,
10826 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10827 LOOP_BLOCK_NUM (loop->start),
10828 LOOP_INSN_UID (loop->start),
10829 LOOP_BLOCK_NUM (loop->cont),
10830 LOOP_INSN_UID (loop->cont),
10831 LOOP_BLOCK_NUM (loop->cont),
10832 LOOP_INSN_UID (loop->cont),
10833 LOOP_BLOCK_NUM (loop->vtop),
10834 LOOP_INSN_UID (loop->vtop),
10835 LOOP_BLOCK_NUM (loop->end),
10836 LOOP_INSN_UID (loop->end));
10837 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10838 LOOP_BLOCK_NUM (loop->top),
10839 LOOP_INSN_UID (loop->top),
10840 LOOP_BLOCK_NUM (loop->scan_start),
10841 LOOP_INSN_UID (loop->scan_start));
10842 fprintf (file, ";; exit_count %d", loop->exit_count);
10843 if (loop->exit_count)
10845 fputs (", labels:", file);
10846 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10848 fprintf (file, " %d ",
10849 LOOP_INSN_UID (XEXP (label, 0)));
10852 fputs ("\n", file);
10854 /* This can happen when a marked loop appears as two nested loops,
10855 say from while (a || b) {}. The inner loop won't match
10856 the loop markers but the outer one will. */
10857 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10858 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10862 /* Call this function from the debugger to dump LOOP. */
10864 void
10865 debug_loop (loop)
10866 const struct loop *loop;
10868 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10871 /* Call this function from the debugger to dump LOOPS. */
10873 void
10874 debug_loops (loops)
10875 const struct loops *loops;
10877 flow_loops_dump (loops, stderr, loop_dump_aux, 1);