2002-06-05 David S. Miller <davem@redhat.com>
[official-gcc.git] / gcc / loop.c
blobba93c33dcd67ec0298e5ffaa161b3c2451694b30
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
127 /* Define a limit to how far apart indices can be and still be merged
128 into a single prefetch. */
129 #ifndef PREFETCH_EXTREME_DIFFERENCE
130 #define PREFETCH_EXTREME_DIFFERENCE 4096
131 #endif
133 /* Issue prefetch instructions before the loop to fetch data to be used
134 in the first few loop iterations. */
135 #ifndef PREFETCH_BEFORE_LOOP
136 #define PREFETCH_BEFORE_LOOP 1
137 #endif
139 /* Do not handle reversed order prefetches (negative stride). */
140 #ifndef PREFETCH_NO_REVERSE_ORDER
141 #define PREFETCH_NO_REVERSE_ORDER 1
142 #endif
144 /* Prefetch even if the GIV is in conditional code. */
145 #ifndef PREFETCH_CONDITIONAL
146 #define PREFETCH_CONDITIONAL 1
147 #endif
149 /* If the loop requires more prefetches than the target can process in
150 parallel then don't prefetch anything in that loop. */
151 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
152 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
153 #endif
155 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
156 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
158 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
159 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
160 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
162 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
163 ((REGNO) < FIRST_PSEUDO_REGISTER \
164 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
167 /* Vector mapping INSN_UIDs to luids.
168 The luids are like uids but increase monotonically always.
169 We use them to see whether a jump comes from outside a given loop. */
171 int *uid_luid;
173 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
174 number the insn is contained in. */
176 struct loop **uid_loop;
178 /* 1 + largest uid of any insn. */
180 int max_uid_for_loop;
182 /* 1 + luid of last insn. */
184 static int max_luid;
186 /* Number of loops detected in current function. Used as index to the
187 next few tables. */
189 static int max_loop_num;
191 /* Bound on pseudo register number before loop optimization.
192 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
193 unsigned int max_reg_before_loop;
195 /* The value to pass to the next call of reg_scan_update. */
196 static int loop_max_reg;
198 #define obstack_chunk_alloc xmalloc
199 #define obstack_chunk_free free
201 /* During the analysis of a loop, a chain of `struct movable's
202 is made to record all the movable insns found.
203 Then the entire chain can be scanned to decide which to move. */
205 struct movable
207 rtx insn; /* A movable insn */
208 rtx set_src; /* The expression this reg is set from. */
209 rtx set_dest; /* The destination of this SET. */
210 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
211 of any registers used within the LIBCALL. */
212 int consec; /* Number of consecutive following insns
213 that must be moved with this one. */
214 unsigned int regno; /* The register it sets */
215 short lifetime; /* lifetime of that register;
216 may be adjusted when matching movables
217 that load the same value are found. */
218 short savings; /* Number of insns we can move for this reg,
219 including other movables that force this
220 or match this one. */
221 unsigned int cond : 1; /* 1 if only conditionally movable */
222 unsigned int force : 1; /* 1 means MUST move this insn */
223 unsigned int global : 1; /* 1 means reg is live outside this loop */
224 /* If PARTIAL is 1, GLOBAL means something different:
225 that the reg is live outside the range from where it is set
226 to the following label. */
227 unsigned int done : 1; /* 1 inhibits further processing of this */
229 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
230 In particular, moving it does not make it
231 invariant. */
232 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
233 load SRC, rather than copying INSN. */
234 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
235 first insn of a consecutive sets group. */
236 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
237 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
238 that we should avoid changing when clearing
239 the rest of the reg. */
240 struct movable *match; /* First entry for same value */
241 struct movable *forces; /* An insn that must be moved if this is */
242 struct movable *next;
246 FILE *loop_dump_stream;
248 /* Forward declarations. */
250 static void invalidate_loops_containing_label PARAMS ((rtx));
251 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
252 static void mark_loop_jump PARAMS ((rtx, struct loop *));
253 static void prescan_loop PARAMS ((struct loop *));
254 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
255 static int consec_sets_invariant_p PARAMS ((const struct loop *,
256 rtx, int, rtx));
257 static int labels_in_range_p PARAMS ((rtx, int));
258 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
259 static void note_addr_stored PARAMS ((rtx, rtx, void *));
260 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
261 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
262 static void scan_loop PARAMS ((struct loop*, int));
263 #if 0
264 static void replace_call_address PARAMS ((rtx, rtx, rtx));
265 #endif
266 static rtx skip_consec_insns PARAMS ((rtx, int));
267 static int libcall_benefit PARAMS ((rtx));
268 static void ignore_some_movables PARAMS ((struct loop_movables *));
269 static void force_movables PARAMS ((struct loop_movables *));
270 static void combine_movables PARAMS ((struct loop_movables *,
271 struct loop_regs *));
272 static int num_unmoved_movables PARAMS ((const struct loop *));
273 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
274 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
275 struct loop_regs *));
276 static void add_label_notes PARAMS ((rtx, rtx));
277 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
278 int, int));
279 static void loop_movables_add PARAMS((struct loop_movables *,
280 struct movable *));
281 static void loop_movables_free PARAMS((struct loop_movables *));
282 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
283 static void loop_bivs_find PARAMS((struct loop *));
284 static void loop_bivs_init_find PARAMS((struct loop *));
285 static void loop_bivs_check PARAMS((struct loop *));
286 static void loop_givs_find PARAMS((struct loop *));
287 static void loop_givs_check PARAMS((struct loop *));
288 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
289 int, int));
290 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
291 struct induction *, rtx));
292 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
293 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
294 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
295 rtx *));
296 static void loop_ivs_free PARAMS((struct loop *));
297 static void strength_reduce PARAMS ((struct loop *, int));
298 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
299 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
300 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
301 static void record_biv PARAMS ((struct loop *, struct induction *,
302 rtx, rtx, rtx, rtx, rtx *,
303 int, int));
304 static void check_final_value PARAMS ((const struct loop *,
305 struct induction *));
306 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
307 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
308 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
309 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
310 static void record_giv PARAMS ((const struct loop *, struct induction *,
311 rtx, rtx, rtx, rtx, rtx, rtx, int,
312 enum g_types, int, int, rtx *));
313 static void update_giv_derive PARAMS ((const struct loop *, rtx));
314 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
315 struct loop_info *));
316 static int basic_induction_var PARAMS ((const struct loop *, rtx,
317 enum machine_mode, rtx, rtx,
318 rtx *, rtx *, rtx **));
319 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
320 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
321 rtx *, rtx *, rtx *, int, int *,
322 enum machine_mode));
323 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
324 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
325 static int check_dbra_loop PARAMS ((struct loop *, int));
326 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
327 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
328 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
329 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
330 static int product_cheap_p PARAMS ((rtx, rtx));
331 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
332 int, int, int));
333 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
334 struct iv_class *, int,
335 basic_block, rtx));
336 static int last_use_this_basic_block PARAMS ((rtx, rtx));
337 static void record_initial PARAMS ((rtx, rtx, void *));
338 static void update_reg_last_use PARAMS ((rtx, rtx));
339 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
340 static void loop_regs_scan PARAMS ((const struct loop *, int));
341 static int count_insns_in_loop PARAMS ((const struct loop *));
342 static void load_mems PARAMS ((const struct loop *));
343 static int insert_loop_mem PARAMS ((rtx *, void *));
344 static int replace_loop_mem PARAMS ((rtx *, void *));
345 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
346 static int replace_loop_reg PARAMS ((rtx *, void *));
347 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
348 static void note_reg_stored PARAMS ((rtx, rtx, void *));
349 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
350 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
351 unsigned int));
352 static int replace_label PARAMS ((rtx *, void *));
353 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
354 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
355 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
356 static void loop_regs_update PARAMS ((const struct loop *, rtx));
357 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
359 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
360 rtx, rtx));
361 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
362 basic_block, rtx, rtx));
363 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
364 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
366 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
367 static void loop_delete_insns PARAMS ((rtx, rtx));
368 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
369 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
370 void debug_ivs PARAMS ((const struct loop *));
371 void debug_iv_class PARAMS ((const struct iv_class *));
372 void debug_biv PARAMS ((const struct induction *));
373 void debug_giv PARAMS ((const struct induction *));
374 void debug_loop PARAMS ((const struct loop *));
375 void debug_loops PARAMS ((const struct loops *));
377 typedef struct rtx_pair
379 rtx r1;
380 rtx r2;
381 } rtx_pair;
383 typedef struct loop_replace_args
385 rtx match;
386 rtx replacement;
387 rtx insn;
388 } loop_replace_args;
390 /* Nonzero iff INSN is between START and END, inclusive. */
391 #define INSN_IN_RANGE_P(INSN, START, END) \
392 (INSN_UID (INSN) < max_uid_for_loop \
393 && INSN_LUID (INSN) >= INSN_LUID (START) \
394 && INSN_LUID (INSN) <= INSN_LUID (END))
396 /* Indirect_jump_in_function is computed once per function. */
397 static int indirect_jump_in_function;
398 static int indirect_jump_in_function_p PARAMS ((rtx));
400 static int compute_luids PARAMS ((rtx, rtx, int));
402 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
403 struct induction *,
404 rtx));
406 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
407 copy the value of the strength reduced giv to its original register. */
408 static int copy_cost;
410 /* Cost of using a register, to normalize the benefits of a giv. */
411 static int reg_address_cost;
413 void
414 init_loop ()
416 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
418 reg_address_cost = address_cost (reg, SImode);
420 copy_cost = COSTS_N_INSNS (1);
423 /* Compute the mapping from uids to luids.
424 LUIDs are numbers assigned to insns, like uids,
425 except that luids increase monotonically through the code.
426 Start at insn START and stop just before END. Assign LUIDs
427 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
428 static int
429 compute_luids (start, end, prev_luid)
430 rtx start, end;
431 int prev_luid;
433 int i;
434 rtx insn;
436 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
438 if (INSN_UID (insn) >= max_uid_for_loop)
439 continue;
440 /* Don't assign luids to line-number NOTEs, so that the distance in
441 luids between two insns is not affected by -g. */
442 if (GET_CODE (insn) != NOTE
443 || NOTE_LINE_NUMBER (insn) <= 0)
444 uid_luid[INSN_UID (insn)] = ++i;
445 else
446 /* Give a line number note the same luid as preceding insn. */
447 uid_luid[INSN_UID (insn)] = i;
449 return i + 1;
452 /* Entry point of this file. Perform loop optimization
453 on the current function. F is the first insn of the function
454 and DUMPFILE is a stream for output of a trace of actions taken
455 (or 0 if none should be output). */
457 void
458 loop_optimize (f, dumpfile, flags)
459 /* f is the first instruction of a chain of insns for one function */
460 rtx f;
461 FILE *dumpfile;
462 int flags;
464 rtx insn;
465 int i;
466 struct loops loops_data;
467 struct loops *loops = &loops_data;
468 struct loop_info *loops_info;
470 loop_dump_stream = dumpfile;
472 init_recog_no_volatile ();
474 max_reg_before_loop = max_reg_num ();
475 loop_max_reg = max_reg_before_loop;
477 regs_may_share = 0;
479 /* Count the number of loops. */
481 max_loop_num = 0;
482 for (insn = f; insn; insn = NEXT_INSN (insn))
484 if (GET_CODE (insn) == NOTE
485 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
486 max_loop_num++;
489 /* Don't waste time if no loops. */
490 if (max_loop_num == 0)
491 return;
493 loops->num = max_loop_num;
495 /* Get size to use for tables indexed by uids.
496 Leave some space for labels allocated by find_and_verify_loops. */
497 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
499 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
500 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
501 sizeof (struct loop *));
503 /* Allocate storage for array of loops. */
504 loops->array = (struct loop *)
505 xcalloc (loops->num, sizeof (struct loop));
507 /* Find and process each loop.
508 First, find them, and record them in order of their beginnings. */
509 find_and_verify_loops (f, loops);
511 /* Allocate and initialize auxiliary loop information. */
512 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
513 for (i = 0; i < loops->num; i++)
514 loops->array[i].aux = loops_info + i;
516 /* Now find all register lifetimes. This must be done after
517 find_and_verify_loops, because it might reorder the insns in the
518 function. */
519 reg_scan (f, max_reg_before_loop, 1);
521 /* This must occur after reg_scan so that registers created by gcse
522 will have entries in the register tables.
524 We could have added a call to reg_scan after gcse_main in toplev.c,
525 but moving this call to init_alias_analysis is more efficient. */
526 init_alias_analysis ();
528 /* See if we went too far. Note that get_max_uid already returns
529 one more that the maximum uid of all insn. */
530 if (get_max_uid () > max_uid_for_loop)
531 abort ();
532 /* Now reset it to the actual size we need. See above. */
533 max_uid_for_loop = get_max_uid ();
535 /* find_and_verify_loops has already called compute_luids, but it
536 might have rearranged code afterwards, so we need to recompute
537 the luids now. */
538 max_luid = compute_luids (f, NULL_RTX, 0);
540 /* Don't leave gaps in uid_luid for insns that have been
541 deleted. It is possible that the first or last insn
542 using some register has been deleted by cross-jumping.
543 Make sure that uid_luid for that former insn's uid
544 points to the general area where that insn used to be. */
545 for (i = 0; i < max_uid_for_loop; i++)
547 uid_luid[0] = uid_luid[i];
548 if (uid_luid[0] != 0)
549 break;
551 for (i = 0; i < max_uid_for_loop; i++)
552 if (uid_luid[i] == 0)
553 uid_luid[i] = uid_luid[i - 1];
555 /* Determine if the function has indirect jump. On some systems
556 this prevents low overhead loop instructions from being used. */
557 indirect_jump_in_function = indirect_jump_in_function_p (f);
559 /* Now scan the loops, last ones first, since this means inner ones are done
560 before outer ones. */
561 for (i = max_loop_num - 1; i >= 0; i--)
563 struct loop *loop = &loops->array[i];
565 if (! loop->invalid && loop->end)
566 scan_loop (loop, flags);
569 end_alias_analysis ();
571 /* Clean up. */
572 free (uid_luid);
573 free (uid_loop);
574 free (loops_info);
575 free (loops->array);
578 /* Returns the next insn, in execution order, after INSN. START and
579 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
580 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
581 insn-stream; it is used with loops that are entered near the
582 bottom. */
584 static rtx
585 next_insn_in_loop (loop, insn)
586 const struct loop *loop;
587 rtx insn;
589 insn = NEXT_INSN (insn);
591 if (insn == loop->end)
593 if (loop->top)
594 /* Go to the top of the loop, and continue there. */
595 insn = loop->top;
596 else
597 /* We're done. */
598 insn = NULL_RTX;
601 if (insn == loop->scan_start)
602 /* We're done. */
603 insn = NULL_RTX;
605 return insn;
608 /* Optimize one loop described by LOOP. */
610 /* ??? Could also move memory writes out of loops if the destination address
611 is invariant, the source is invariant, the memory write is not volatile,
612 and if we can prove that no read inside the loop can read this address
613 before the write occurs. If there is a read of this address after the
614 write, then we can also mark the memory read as invariant. */
616 static void
617 scan_loop (loop, flags)
618 struct loop *loop;
619 int flags;
621 struct loop_info *loop_info = LOOP_INFO (loop);
622 struct loop_regs *regs = LOOP_REGS (loop);
623 int i;
624 rtx loop_start = loop->start;
625 rtx loop_end = loop->end;
626 rtx p;
627 /* 1 if we are scanning insns that could be executed zero times. */
628 int maybe_never = 0;
629 /* 1 if we are scanning insns that might never be executed
630 due to a subroutine call which might exit before they are reached. */
631 int call_passed = 0;
632 /* Jump insn that enters the loop, or 0 if control drops in. */
633 rtx loop_entry_jump = 0;
634 /* Number of insns in the loop. */
635 int insn_count;
636 int tem;
637 rtx temp, update_start, update_end;
638 /* The SET from an insn, if it is the only SET in the insn. */
639 rtx set, set1;
640 /* Chain describing insns movable in current loop. */
641 struct loop_movables *movables = LOOP_MOVABLES (loop);
642 /* Ratio of extra register life span we can justify
643 for saving an instruction. More if loop doesn't call subroutines
644 since in that case saving an insn makes more difference
645 and more registers are available. */
646 int threshold;
647 /* Nonzero if we are scanning instructions in a sub-loop. */
648 int loop_depth = 0;
650 loop->top = 0;
652 movables->head = 0;
653 movables->last = 0;
655 /* Determine whether this loop starts with a jump down to a test at
656 the end. This will occur for a small number of loops with a test
657 that is too complex to duplicate in front of the loop.
659 We search for the first insn or label in the loop, skipping NOTEs.
660 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
661 (because we might have a loop executed only once that contains a
662 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
663 (in case we have a degenerate loop).
665 Note that if we mistakenly think that a loop is entered at the top
666 when, in fact, it is entered at the exit test, the only effect will be
667 slightly poorer optimization. Making the opposite error can generate
668 incorrect code. Since very few loops now start with a jump to the
669 exit test, the code here to detect that case is very conservative. */
671 for (p = NEXT_INSN (loop_start);
672 p != loop_end
673 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
674 && (GET_CODE (p) != NOTE
675 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
676 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
677 p = NEXT_INSN (p))
680 loop->scan_start = p;
682 /* If loop end is the end of the current function, then emit a
683 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
684 note insn. This is the position we use when sinking insns out of
685 the loop. */
686 if (NEXT_INSN (loop->end) != 0)
687 loop->sink = NEXT_INSN (loop->end);
688 else
689 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
691 /* Set up variables describing this loop. */
692 prescan_loop (loop);
693 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
695 /* If loop has a jump before the first label,
696 the true entry is the target of that jump.
697 Start scan from there.
698 But record in LOOP->TOP the place where the end-test jumps
699 back to so we can scan that after the end of the loop. */
700 if (GET_CODE (p) == JUMP_INSN)
702 loop_entry_jump = p;
704 /* Loop entry must be unconditional jump (and not a RETURN) */
705 if (any_uncondjump_p (p)
706 && JUMP_LABEL (p) != 0
707 /* Check to see whether the jump actually
708 jumps out of the loop (meaning it's no loop).
709 This case can happen for things like
710 do {..} while (0). If this label was generated previously
711 by loop, we can't tell anything about it and have to reject
712 the loop. */
713 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
715 loop->top = next_label (loop->scan_start);
716 loop->scan_start = JUMP_LABEL (p);
720 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
721 as required by loop_reg_used_before_p. So skip such loops. (This
722 test may never be true, but it's best to play it safe.)
724 Also, skip loops where we do not start scanning at a label. This
725 test also rejects loops starting with a JUMP_INSN that failed the
726 test above. */
728 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
729 || GET_CODE (loop->scan_start) != CODE_LABEL)
731 if (loop_dump_stream)
732 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
733 INSN_UID (loop_start), INSN_UID (loop_end));
734 return;
737 /* Allocate extra space for REGs that might be created by load_mems.
738 We allocate a little extra slop as well, in the hopes that we
739 won't have to reallocate the regs array. */
740 loop_regs_scan (loop, loop_info->mems_idx + 16);
741 insn_count = count_insns_in_loop (loop);
743 if (loop_dump_stream)
745 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
746 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
747 if (loop->cont)
748 fprintf (loop_dump_stream, "Continue at insn %d.\n",
749 INSN_UID (loop->cont));
752 /* Scan through the loop finding insns that are safe to move.
753 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
754 this reg will be considered invariant for subsequent insns.
755 We consider whether subsequent insns use the reg
756 in deciding whether it is worth actually moving.
758 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
759 and therefore it is possible that the insns we are scanning
760 would never be executed. At such times, we must make sure
761 that it is safe to execute the insn once instead of zero times.
762 When MAYBE_NEVER is 0, all insns will be executed at least once
763 so that is not a problem. */
765 for (p = next_insn_in_loop (loop, loop->scan_start);
766 p != NULL_RTX;
767 p = next_insn_in_loop (loop, p))
769 if (GET_CODE (p) == INSN
770 && (set = single_set (p))
771 && GET_CODE (SET_DEST (set)) == REG
772 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
773 && SET_DEST (set) != pic_offset_table_rtx
774 #endif
775 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
777 int tem1 = 0;
778 int tem2 = 0;
779 int move_insn = 0;
780 rtx src = SET_SRC (set);
781 rtx dependencies = 0;
783 /* Figure out what to use as a source of this insn. If a REG_EQUIV
784 note is given or if a REG_EQUAL note with a constant operand is
785 specified, use it as the source and mark that we should move
786 this insn by calling emit_move_insn rather that duplicating the
787 insn.
789 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
790 is present. */
791 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
792 if (temp)
793 src = XEXP (temp, 0), move_insn = 1;
794 else
796 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
797 if (temp && CONSTANT_P (XEXP (temp, 0)))
798 src = XEXP (temp, 0), move_insn = 1;
799 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
801 src = XEXP (temp, 0);
802 /* A libcall block can use regs that don't appear in
803 the equivalent expression. To move the libcall,
804 we must move those regs too. */
805 dependencies = libcall_other_reg (p, src);
809 /* For parallels, add any possible uses to the depencies, as we can't move
810 the insn without resolving them first. */
811 if (GET_CODE (PATTERN (p)) == PARALLEL)
813 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
815 rtx x = XVECEXP (PATTERN (p), 0, i);
816 if (GET_CODE (x) == USE)
817 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
821 /* Don't try to optimize a register that was made
822 by loop-optimization for an inner loop.
823 We don't know its life-span, so we can't compute the benefit. */
824 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
826 else if (/* The register is used in basic blocks other
827 than the one where it is set (meaning that
828 something after this point in the loop might
829 depend on its value before the set). */
830 ! reg_in_basic_block_p (p, SET_DEST (set))
831 /* And the set is not guaranteed to be executed once
832 the loop starts, or the value before the set is
833 needed before the set occurs...
835 ??? Note we have quadratic behaviour here, mitigated
836 by the fact that the previous test will often fail for
837 large loops. Rather than re-scanning the entire loop
838 each time for register usage, we should build tables
839 of the register usage and use them here instead. */
840 && (maybe_never
841 || loop_reg_used_before_p (loop, set, p)))
842 /* It is unsafe to move the set.
844 This code used to consider it OK to move a set of a variable
845 which was not created by the user and not used in an exit test.
846 That behavior is incorrect and was removed. */
848 else if ((tem = loop_invariant_p (loop, src))
849 && (dependencies == 0
850 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
851 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
852 || (tem1
853 = consec_sets_invariant_p
854 (loop, SET_DEST (set),
855 regs->array[REGNO (SET_DEST (set))].set_in_loop,
856 p)))
857 /* If the insn can cause a trap (such as divide by zero),
858 can't move it unless it's guaranteed to be executed
859 once loop is entered. Even a function call might
860 prevent the trap insn from being reached
861 (since it might exit!) */
862 && ! ((maybe_never || call_passed)
863 && may_trap_p (src)))
865 struct movable *m;
866 int regno = REGNO (SET_DEST (set));
868 /* A potential lossage is where we have a case where two insns
869 can be combined as long as they are both in the loop, but
870 we move one of them outside the loop. For large loops,
871 this can lose. The most common case of this is the address
872 of a function being called.
874 Therefore, if this register is marked as being used exactly
875 once if we are in a loop with calls (a "large loop"), see if
876 we can replace the usage of this register with the source
877 of this SET. If we can, delete this insn.
879 Don't do this if P has a REG_RETVAL note or if we have
880 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
882 if (loop_info->has_call
883 && regs->array[regno].single_usage != 0
884 && regs->array[regno].single_usage != const0_rtx
885 && REGNO_FIRST_UID (regno) == INSN_UID (p)
886 && (REGNO_LAST_UID (regno)
887 == INSN_UID (regs->array[regno].single_usage))
888 && regs->array[regno].set_in_loop == 1
889 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
890 && ! side_effects_p (SET_SRC (set))
891 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
892 && (! SMALL_REGISTER_CLASSES
893 || (! (GET_CODE (SET_SRC (set)) == REG
894 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
895 /* This test is not redundant; SET_SRC (set) might be
896 a call-clobbered register and the life of REGNO
897 might span a call. */
898 && ! modified_between_p (SET_SRC (set), p,
899 regs->array[regno].single_usage)
900 && no_labels_between_p (p, regs->array[regno].single_usage)
901 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
902 regs->array[regno].single_usage))
904 /* Replace any usage in a REG_EQUAL note. Must copy the
905 new source, so that we don't get rtx sharing between the
906 SET_SOURCE and REG_NOTES of insn p. */
907 REG_NOTES (regs->array[regno].single_usage)
908 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
909 SET_DEST (set), copy_rtx (SET_SRC (set)));
911 delete_insn (p);
912 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
913 regs->array[regno+i].set_in_loop = 0;
914 continue;
917 m = (struct movable *) xmalloc (sizeof (struct movable));
918 m->next = 0;
919 m->insn = p;
920 m->set_src = src;
921 m->dependencies = dependencies;
922 m->set_dest = SET_DEST (set);
923 m->force = 0;
924 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
925 m->done = 0;
926 m->forces = 0;
927 m->partial = 0;
928 m->move_insn = move_insn;
929 m->move_insn_first = 0;
930 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
931 m->savemode = VOIDmode;
932 m->regno = regno;
933 /* Set M->cond if either loop_invariant_p
934 or consec_sets_invariant_p returned 2
935 (only conditionally invariant). */
936 m->cond = ((tem | tem1 | tem2) > 1);
937 m->global = LOOP_REG_GLOBAL_P (loop, regno);
938 m->match = 0;
939 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
940 m->savings = regs->array[regno].n_times_set;
941 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
942 m->savings += libcall_benefit (p);
943 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
944 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
945 /* Add M to the end of the chain MOVABLES. */
946 loop_movables_add (movables, m);
948 if (m->consec > 0)
950 /* It is possible for the first instruction to have a
951 REG_EQUAL note but a non-invariant SET_SRC, so we must
952 remember the status of the first instruction in case
953 the last instruction doesn't have a REG_EQUAL note. */
954 m->move_insn_first = m->move_insn;
956 /* Skip this insn, not checking REG_LIBCALL notes. */
957 p = next_nonnote_insn (p);
958 /* Skip the consecutive insns, if there are any. */
959 p = skip_consec_insns (p, m->consec);
960 /* Back up to the last insn of the consecutive group. */
961 p = prev_nonnote_insn (p);
963 /* We must now reset m->move_insn, m->is_equiv, and possibly
964 m->set_src to correspond to the effects of all the
965 insns. */
966 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
967 if (temp)
968 m->set_src = XEXP (temp, 0), m->move_insn = 1;
969 else
971 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
972 if (temp && CONSTANT_P (XEXP (temp, 0)))
973 m->set_src = XEXP (temp, 0), m->move_insn = 1;
974 else
975 m->move_insn = 0;
978 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
981 /* If this register is always set within a STRICT_LOW_PART
982 or set to zero, then its high bytes are constant.
983 So clear them outside the loop and within the loop
984 just load the low bytes.
985 We must check that the machine has an instruction to do so.
986 Also, if the value loaded into the register
987 depends on the same register, this cannot be done. */
988 else if (SET_SRC (set) == const0_rtx
989 && GET_CODE (NEXT_INSN (p)) == INSN
990 && (set1 = single_set (NEXT_INSN (p)))
991 && GET_CODE (set1) == SET
992 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
993 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
994 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
995 == SET_DEST (set))
996 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
998 int regno = REGNO (SET_DEST (set));
999 if (regs->array[regno].set_in_loop == 2)
1001 struct movable *m;
1002 m = (struct movable *) xmalloc (sizeof (struct movable));
1003 m->next = 0;
1004 m->insn = p;
1005 m->set_dest = SET_DEST (set);
1006 m->dependencies = 0;
1007 m->force = 0;
1008 m->consec = 0;
1009 m->done = 0;
1010 m->forces = 0;
1011 m->move_insn = 0;
1012 m->move_insn_first = 0;
1013 m->partial = 1;
1014 /* If the insn may not be executed on some cycles,
1015 we can't clear the whole reg; clear just high part.
1016 Not even if the reg is used only within this loop.
1017 Consider this:
1018 while (1)
1019 while (s != t) {
1020 if (foo ()) x = *s;
1021 use (x);
1023 Clearing x before the inner loop could clobber a value
1024 being saved from the last time around the outer loop.
1025 However, if the reg is not used outside this loop
1026 and all uses of the register are in the same
1027 basic block as the store, there is no problem.
1029 If this insn was made by loop, we don't know its
1030 INSN_LUID and hence must make a conservative
1031 assumption. */
1032 m->global = (INSN_UID (p) >= max_uid_for_loop
1033 || LOOP_REG_GLOBAL_P (loop, regno)
1034 || (labels_in_range_p
1035 (p, REGNO_FIRST_LUID (regno))));
1036 if (maybe_never && m->global)
1037 m->savemode = GET_MODE (SET_SRC (set1));
1038 else
1039 m->savemode = VOIDmode;
1040 m->regno = regno;
1041 m->cond = 0;
1042 m->match = 0;
1043 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1044 m->savings = 1;
1045 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1046 regs->array[regno+i].set_in_loop = -1;
1047 /* Add M to the end of the chain MOVABLES. */
1048 loop_movables_add (movables, m);
1052 /* Past a call insn, we get to insns which might not be executed
1053 because the call might exit. This matters for insns that trap.
1054 Constant and pure call insns always return, so they don't count. */
1055 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1056 call_passed = 1;
1057 /* Past a label or a jump, we get to insns for which we
1058 can't count on whether or how many times they will be
1059 executed during each iteration. Therefore, we can
1060 only move out sets of trivial variables
1061 (those not used after the loop). */
1062 /* Similar code appears twice in strength_reduce. */
1063 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1064 /* If we enter the loop in the middle, and scan around to the
1065 beginning, don't set maybe_never for that. This must be an
1066 unconditional jump, otherwise the code at the top of the
1067 loop might never be executed. Unconditional jumps are
1068 followed by a barrier then the loop_end. */
1069 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1070 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1071 && any_uncondjump_p (p)))
1072 maybe_never = 1;
1073 else if (GET_CODE (p) == NOTE)
1075 /* At the virtual top of a converted loop, insns are again known to
1076 be executed: logically, the loop begins here even though the exit
1077 code has been duplicated. */
1078 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1079 maybe_never = call_passed = 0;
1080 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1081 loop_depth++;
1082 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1083 loop_depth--;
1087 /* If one movable subsumes another, ignore that other. */
1089 ignore_some_movables (movables);
1091 /* For each movable insn, see if the reg that it loads
1092 leads when it dies right into another conditionally movable insn.
1093 If so, record that the second insn "forces" the first one,
1094 since the second can be moved only if the first is. */
1096 force_movables (movables);
1098 /* See if there are multiple movable insns that load the same value.
1099 If there are, make all but the first point at the first one
1100 through the `match' field, and add the priorities of them
1101 all together as the priority of the first. */
1103 combine_movables (movables, regs);
1105 /* Now consider each movable insn to decide whether it is worth moving.
1106 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1108 Generally this increases code size, so do not move moveables when
1109 optimizing for code size. */
1111 if (! optimize_size)
1113 move_movables (loop, movables, threshold, insn_count);
1115 /* Recalculate regs->array if move_movables has created new
1116 registers. */
1117 if (max_reg_num () > regs->num)
1119 loop_regs_scan (loop, 0);
1120 for (update_start = loop_start;
1121 PREV_INSN (update_start)
1122 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1123 update_start = PREV_INSN (update_start))
1125 update_end = NEXT_INSN (loop_end);
1127 reg_scan_update (update_start, update_end, loop_max_reg);
1128 loop_max_reg = max_reg_num ();
1132 /* Now candidates that still are negative are those not moved.
1133 Change regs->array[I].set_in_loop to indicate that those are not actually
1134 invariant. */
1135 for (i = 0; i < regs->num; i++)
1136 if (regs->array[i].set_in_loop < 0)
1137 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1139 /* Now that we've moved some things out of the loop, we might be able to
1140 hoist even more memory references. */
1141 load_mems (loop);
1143 /* Recalculate regs->array if load_mems has created new registers. */
1144 if (max_reg_num () > regs->num)
1145 loop_regs_scan (loop, 0);
1147 for (update_start = loop_start;
1148 PREV_INSN (update_start)
1149 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1150 update_start = PREV_INSN (update_start))
1152 update_end = NEXT_INSN (loop_end);
1154 reg_scan_update (update_start, update_end, loop_max_reg);
1155 loop_max_reg = max_reg_num ();
1157 if (flag_strength_reduce)
1159 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1160 /* Ensure our label doesn't go away. */
1161 LABEL_NUSES (update_end)++;
1163 strength_reduce (loop, flags);
1165 reg_scan_update (update_start, update_end, loop_max_reg);
1166 loop_max_reg = max_reg_num ();
1168 if (update_end && GET_CODE (update_end) == CODE_LABEL
1169 && --LABEL_NUSES (update_end) == 0)
1170 delete_related_insns (update_end);
1174 /* The movable information is required for strength reduction. */
1175 loop_movables_free (movables);
1177 free (regs->array);
1178 regs->array = 0;
1179 regs->num = 0;
1182 /* Add elements to *OUTPUT to record all the pseudo-regs
1183 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1185 void
1186 record_excess_regs (in_this, not_in_this, output)
1187 rtx in_this, not_in_this;
1188 rtx *output;
1190 enum rtx_code code;
1191 const char *fmt;
1192 int i;
1194 code = GET_CODE (in_this);
1196 switch (code)
1198 case PC:
1199 case CC0:
1200 case CONST_INT:
1201 case CONST_DOUBLE:
1202 case CONST:
1203 case SYMBOL_REF:
1204 case LABEL_REF:
1205 return;
1207 case REG:
1208 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1209 && ! reg_mentioned_p (in_this, not_in_this))
1210 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1211 return;
1213 default:
1214 break;
1217 fmt = GET_RTX_FORMAT (code);
1218 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1220 int j;
1222 switch (fmt[i])
1224 case 'E':
1225 for (j = 0; j < XVECLEN (in_this, i); j++)
1226 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1227 break;
1229 case 'e':
1230 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1231 break;
1236 /* Check what regs are referred to in the libcall block ending with INSN,
1237 aside from those mentioned in the equivalent value.
1238 If there are none, return 0.
1239 If there are one or more, return an EXPR_LIST containing all of them. */
1242 libcall_other_reg (insn, equiv)
1243 rtx insn, equiv;
1245 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1246 rtx p = XEXP (note, 0);
1247 rtx output = 0;
1249 /* First, find all the regs used in the libcall block
1250 that are not mentioned as inputs to the result. */
1252 while (p != insn)
1254 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1255 || GET_CODE (p) == CALL_INSN)
1256 record_excess_regs (PATTERN (p), equiv, &output);
1257 p = NEXT_INSN (p);
1260 return output;
1263 /* Return 1 if all uses of REG
1264 are between INSN and the end of the basic block. */
1266 static int
1267 reg_in_basic_block_p (insn, reg)
1268 rtx insn, reg;
1270 int regno = REGNO (reg);
1271 rtx p;
1273 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1274 return 0;
1276 /* Search this basic block for the already recorded last use of the reg. */
1277 for (p = insn; p; p = NEXT_INSN (p))
1279 switch (GET_CODE (p))
1281 case NOTE:
1282 break;
1284 case INSN:
1285 case CALL_INSN:
1286 /* Ordinary insn: if this is the last use, we win. */
1287 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1288 return 1;
1289 break;
1291 case JUMP_INSN:
1292 /* Jump insn: if this is the last use, we win. */
1293 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1294 return 1;
1295 /* Otherwise, it's the end of the basic block, so we lose. */
1296 return 0;
1298 case CODE_LABEL:
1299 case BARRIER:
1300 /* It's the end of the basic block, so we lose. */
1301 return 0;
1303 default:
1304 break;
1308 /* The "last use" that was recorded can't be found after the first
1309 use. This can happen when the last use was deleted while
1310 processing an inner loop, this inner loop was then completely
1311 unrolled, and the outer loop is always exited after the inner loop,
1312 so that everything after the first use becomes a single basic block. */
1313 return 1;
1316 /* Compute the benefit of eliminating the insns in the block whose
1317 last insn is LAST. This may be a group of insns used to compute a
1318 value directly or can contain a library call. */
1320 static int
1321 libcall_benefit (last)
1322 rtx last;
1324 rtx insn;
1325 int benefit = 0;
1327 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1328 insn != last; insn = NEXT_INSN (insn))
1330 if (GET_CODE (insn) == CALL_INSN)
1331 benefit += 10; /* Assume at least this many insns in a library
1332 routine. */
1333 else if (GET_CODE (insn) == INSN
1334 && GET_CODE (PATTERN (insn)) != USE
1335 && GET_CODE (PATTERN (insn)) != CLOBBER)
1336 benefit++;
1339 return benefit;
1342 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1344 static rtx
1345 skip_consec_insns (insn, count)
1346 rtx insn;
1347 int count;
1349 for (; count > 0; count--)
1351 rtx temp;
1353 /* If first insn of libcall sequence, skip to end. */
1354 /* Do this at start of loop, since INSN is guaranteed to
1355 be an insn here. */
1356 if (GET_CODE (insn) != NOTE
1357 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1358 insn = XEXP (temp, 0);
1361 insn = NEXT_INSN (insn);
1362 while (GET_CODE (insn) == NOTE);
1365 return insn;
1368 /* Ignore any movable whose insn falls within a libcall
1369 which is part of another movable.
1370 We make use of the fact that the movable for the libcall value
1371 was made later and so appears later on the chain. */
1373 static void
1374 ignore_some_movables (movables)
1375 struct loop_movables *movables;
1377 struct movable *m, *m1;
1379 for (m = movables->head; m; m = m->next)
1381 /* Is this a movable for the value of a libcall? */
1382 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1383 if (note)
1385 rtx insn;
1386 /* Check for earlier movables inside that range,
1387 and mark them invalid. We cannot use LUIDs here because
1388 insns created by loop.c for prior loops don't have LUIDs.
1389 Rather than reject all such insns from movables, we just
1390 explicitly check each insn in the libcall (since invariant
1391 libcalls aren't that common). */
1392 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1393 for (m1 = movables->head; m1 != m; m1 = m1->next)
1394 if (m1->insn == insn)
1395 m1->done = 1;
1400 /* For each movable insn, see if the reg that it loads
1401 leads when it dies right into another conditionally movable insn.
1402 If so, record that the second insn "forces" the first one,
1403 since the second can be moved only if the first is. */
1405 static void
1406 force_movables (movables)
1407 struct loop_movables *movables;
1409 struct movable *m, *m1;
1411 for (m1 = movables->head; m1; m1 = m1->next)
1412 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1413 if (!m1->partial && !m1->done)
1415 int regno = m1->regno;
1416 for (m = m1->next; m; m = m->next)
1417 /* ??? Could this be a bug? What if CSE caused the
1418 register of M1 to be used after this insn?
1419 Since CSE does not update regno_last_uid,
1420 this insn M->insn might not be where it dies.
1421 But very likely this doesn't matter; what matters is
1422 that M's reg is computed from M1's reg. */
1423 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1424 && !m->done)
1425 break;
1426 if (m != 0 && m->set_src == m1->set_dest
1427 /* If m->consec, m->set_src isn't valid. */
1428 && m->consec == 0)
1429 m = 0;
1431 /* Increase the priority of the moving the first insn
1432 since it permits the second to be moved as well. */
1433 if (m != 0)
1435 m->forces = m1;
1436 m1->lifetime += m->lifetime;
1437 m1->savings += m->savings;
1442 /* Find invariant expressions that are equal and can be combined into
1443 one register. */
1445 static void
1446 combine_movables (movables, regs)
1447 struct loop_movables *movables;
1448 struct loop_regs *regs;
1450 struct movable *m;
1451 char *matched_regs = (char *) xmalloc (regs->num);
1452 enum machine_mode mode;
1454 /* Regs that are set more than once are not allowed to match
1455 or be matched. I'm no longer sure why not. */
1456 /* Only pseudo registers are allowed to match or be matched,
1457 since move_movables does not validate the change. */
1458 /* Perhaps testing m->consec_sets would be more appropriate here? */
1460 for (m = movables->head; m; m = m->next)
1461 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1462 && m->regno >= FIRST_PSEUDO_REGISTER
1463 && !m->partial)
1465 struct movable *m1;
1466 int regno = m->regno;
1468 memset (matched_regs, 0, regs->num);
1469 matched_regs[regno] = 1;
1471 /* We want later insns to match the first one. Don't make the first
1472 one match any later ones. So start this loop at m->next. */
1473 for (m1 = m->next; m1; m1 = m1->next)
1474 if (m != m1 && m1->match == 0
1475 && regs->array[m1->regno].n_times_set == 1
1476 && m1->regno >= FIRST_PSEUDO_REGISTER
1477 /* A reg used outside the loop mustn't be eliminated. */
1478 && !m1->global
1479 /* A reg used for zero-extending mustn't be eliminated. */
1480 && !m1->partial
1481 && (matched_regs[m1->regno]
1484 /* Can combine regs with different modes loaded from the
1485 same constant only if the modes are the same or
1486 if both are integer modes with M wider or the same
1487 width as M1. The check for integer is redundant, but
1488 safe, since the only case of differing destination
1489 modes with equal sources is when both sources are
1490 VOIDmode, i.e., CONST_INT. */
1491 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1492 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1493 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1494 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1495 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1496 /* See if the source of M1 says it matches M. */
1497 && ((GET_CODE (m1->set_src) == REG
1498 && matched_regs[REGNO (m1->set_src)])
1499 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1500 movables, regs))))
1501 && ((m->dependencies == m1->dependencies)
1502 || rtx_equal_p (m->dependencies, m1->dependencies)))
1504 m->lifetime += m1->lifetime;
1505 m->savings += m1->savings;
1506 m1->done = 1;
1507 m1->match = m;
1508 matched_regs[m1->regno] = 1;
1512 /* Now combine the regs used for zero-extension.
1513 This can be done for those not marked `global'
1514 provided their lives don't overlap. */
1516 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1517 mode = GET_MODE_WIDER_MODE (mode))
1519 struct movable *m0 = 0;
1521 /* Combine all the registers for extension from mode MODE.
1522 Don't combine any that are used outside this loop. */
1523 for (m = movables->head; m; m = m->next)
1524 if (m->partial && ! m->global
1525 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1527 struct movable *m1;
1529 int first = REGNO_FIRST_LUID (m->regno);
1530 int last = REGNO_LAST_LUID (m->regno);
1532 if (m0 == 0)
1534 /* First one: don't check for overlap, just record it. */
1535 m0 = m;
1536 continue;
1539 /* Make sure they extend to the same mode.
1540 (Almost always true.) */
1541 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1542 continue;
1544 /* We already have one: check for overlap with those
1545 already combined together. */
1546 for (m1 = movables->head; m1 != m; m1 = m1->next)
1547 if (m1 == m0 || (m1->partial && m1->match == m0))
1548 if (! (REGNO_FIRST_LUID (m1->regno) > last
1549 || REGNO_LAST_LUID (m1->regno) < first))
1550 goto overlap;
1552 /* No overlap: we can combine this with the others. */
1553 m0->lifetime += m->lifetime;
1554 m0->savings += m->savings;
1555 m->done = 1;
1556 m->match = m0;
1558 overlap:
1563 /* Clean up. */
1564 free (matched_regs);
1567 /* Returns the number of movable instructions in LOOP that were not
1568 moved outside the loop. */
1570 static int
1571 num_unmoved_movables (loop)
1572 const struct loop *loop;
1574 int num = 0;
1575 struct movable *m;
1577 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1578 if (!m->done)
1579 ++num;
1581 return num;
1585 /* Return 1 if regs X and Y will become the same if moved. */
1587 static int
1588 regs_match_p (x, y, movables)
1589 rtx x, y;
1590 struct loop_movables *movables;
1592 unsigned int xn = REGNO (x);
1593 unsigned int yn = REGNO (y);
1594 struct movable *mx, *my;
1596 for (mx = movables->head; mx; mx = mx->next)
1597 if (mx->regno == xn)
1598 break;
1600 for (my = movables->head; my; my = my->next)
1601 if (my->regno == yn)
1602 break;
1604 return (mx && my
1605 && ((mx->match == my->match && mx->match != 0)
1606 || mx->match == my
1607 || mx == my->match));
1610 /* Return 1 if X and Y are identical-looking rtx's.
1611 This is the Lisp function EQUAL for rtx arguments.
1613 If two registers are matching movables or a movable register and an
1614 equivalent constant, consider them equal. */
1616 static int
1617 rtx_equal_for_loop_p (x, y, movables, regs)
1618 rtx x, y;
1619 struct loop_movables *movables;
1620 struct loop_regs *regs;
1622 int i;
1623 int j;
1624 struct movable *m;
1625 enum rtx_code code;
1626 const char *fmt;
1628 if (x == y)
1629 return 1;
1630 if (x == 0 || y == 0)
1631 return 0;
1633 code = GET_CODE (x);
1635 /* If we have a register and a constant, they may sometimes be
1636 equal. */
1637 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1638 && CONSTANT_P (y))
1640 for (m = movables->head; m; m = m->next)
1641 if (m->move_insn && m->regno == REGNO (x)
1642 && rtx_equal_p (m->set_src, y))
1643 return 1;
1645 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1646 && CONSTANT_P (x))
1648 for (m = movables->head; m; m = m->next)
1649 if (m->move_insn && m->regno == REGNO (y)
1650 && rtx_equal_p (m->set_src, x))
1651 return 1;
1654 /* Otherwise, rtx's of different codes cannot be equal. */
1655 if (code != GET_CODE (y))
1656 return 0;
1658 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1659 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1661 if (GET_MODE (x) != GET_MODE (y))
1662 return 0;
1664 /* These three types of rtx's can be compared nonrecursively. */
1665 if (code == REG)
1666 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1668 if (code == LABEL_REF)
1669 return XEXP (x, 0) == XEXP (y, 0);
1670 if (code == SYMBOL_REF)
1671 return XSTR (x, 0) == XSTR (y, 0);
1673 /* Compare the elements. If any pair of corresponding elements
1674 fail to match, return 0 for the whole things. */
1676 fmt = GET_RTX_FORMAT (code);
1677 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1679 switch (fmt[i])
1681 case 'w':
1682 if (XWINT (x, i) != XWINT (y, i))
1683 return 0;
1684 break;
1686 case 'i':
1687 if (XINT (x, i) != XINT (y, i))
1688 return 0;
1689 break;
1691 case 'E':
1692 /* Two vectors must have the same length. */
1693 if (XVECLEN (x, i) != XVECLEN (y, i))
1694 return 0;
1696 /* And the corresponding elements must match. */
1697 for (j = 0; j < XVECLEN (x, i); j++)
1698 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1699 movables, regs) == 0)
1700 return 0;
1701 break;
1703 case 'e':
1704 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1705 == 0)
1706 return 0;
1707 break;
1709 case 's':
1710 if (strcmp (XSTR (x, i), XSTR (y, i)))
1711 return 0;
1712 break;
1714 case 'u':
1715 /* These are just backpointers, so they don't matter. */
1716 break;
1718 case '0':
1719 break;
1721 /* It is believed that rtx's at this level will never
1722 contain anything but integers and other rtx's,
1723 except for within LABEL_REFs and SYMBOL_REFs. */
1724 default:
1725 abort ();
1728 return 1;
1731 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1732 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1733 references is incremented once for each added note. */
1735 static void
1736 add_label_notes (x, insns)
1737 rtx x;
1738 rtx insns;
1740 enum rtx_code code = GET_CODE (x);
1741 int i, j;
1742 const char *fmt;
1743 rtx insn;
1745 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1747 /* This code used to ignore labels that referred to dispatch tables to
1748 avoid flow generating (slighly) worse code.
1750 We no longer ignore such label references (see LABEL_REF handling in
1751 mark_jump_label for additional information). */
1752 for (insn = insns; insn; insn = NEXT_INSN (insn))
1753 if (reg_mentioned_p (XEXP (x, 0), insn))
1755 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1756 REG_NOTES (insn));
1757 if (LABEL_P (XEXP (x, 0)))
1758 LABEL_NUSES (XEXP (x, 0))++;
1762 fmt = GET_RTX_FORMAT (code);
1763 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1765 if (fmt[i] == 'e')
1766 add_label_notes (XEXP (x, i), insns);
1767 else if (fmt[i] == 'E')
1768 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1769 add_label_notes (XVECEXP (x, i, j), insns);
1773 /* Scan MOVABLES, and move the insns that deserve to be moved.
1774 If two matching movables are combined, replace one reg with the
1775 other throughout. */
1777 static void
1778 move_movables (loop, movables, threshold, insn_count)
1779 struct loop *loop;
1780 struct loop_movables *movables;
1781 int threshold;
1782 int insn_count;
1784 struct loop_regs *regs = LOOP_REGS (loop);
1785 int nregs = regs->num;
1786 rtx new_start = 0;
1787 struct movable *m;
1788 rtx p;
1789 rtx loop_start = loop->start;
1790 rtx loop_end = loop->end;
1791 /* Map of pseudo-register replacements to handle combining
1792 when we move several insns that load the same value
1793 into different pseudo-registers. */
1794 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1795 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1797 for (m = movables->head; m; m = m->next)
1799 /* Describe this movable insn. */
1801 if (loop_dump_stream)
1803 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1804 INSN_UID (m->insn), m->regno, m->lifetime);
1805 if (m->consec > 0)
1806 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1807 if (m->cond)
1808 fprintf (loop_dump_stream, "cond ");
1809 if (m->force)
1810 fprintf (loop_dump_stream, "force ");
1811 if (m->global)
1812 fprintf (loop_dump_stream, "global ");
1813 if (m->done)
1814 fprintf (loop_dump_stream, "done ");
1815 if (m->move_insn)
1816 fprintf (loop_dump_stream, "move-insn ");
1817 if (m->match)
1818 fprintf (loop_dump_stream, "matches %d ",
1819 INSN_UID (m->match->insn));
1820 if (m->forces)
1821 fprintf (loop_dump_stream, "forces %d ",
1822 INSN_UID (m->forces->insn));
1825 /* Ignore the insn if it's already done (it matched something else).
1826 Otherwise, see if it is now safe to move. */
1828 if (!m->done
1829 && (! m->cond
1830 || (1 == loop_invariant_p (loop, m->set_src)
1831 && (m->dependencies == 0
1832 || 1 == loop_invariant_p (loop, m->dependencies))
1833 && (m->consec == 0
1834 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1835 m->consec + 1,
1836 m->insn))))
1837 && (! m->forces || m->forces->done))
1839 int regno;
1840 rtx p;
1841 int savings = m->savings;
1843 /* We have an insn that is safe to move.
1844 Compute its desirability. */
1846 p = m->insn;
1847 regno = m->regno;
1849 if (loop_dump_stream)
1850 fprintf (loop_dump_stream, "savings %d ", savings);
1852 if (regs->array[regno].moved_once && loop_dump_stream)
1853 fprintf (loop_dump_stream, "halved since already moved ");
1855 /* An insn MUST be moved if we already moved something else
1856 which is safe only if this one is moved too: that is,
1857 if already_moved[REGNO] is nonzero. */
1859 /* An insn is desirable to move if the new lifetime of the
1860 register is no more than THRESHOLD times the old lifetime.
1861 If it's not desirable, it means the loop is so big
1862 that moving won't speed things up much,
1863 and it is liable to make register usage worse. */
1865 /* It is also desirable to move if it can be moved at no
1866 extra cost because something else was already moved. */
1868 if (already_moved[regno]
1869 || flag_move_all_movables
1870 || (threshold * savings * m->lifetime) >=
1871 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1872 || (m->forces && m->forces->done
1873 && regs->array[m->forces->regno].n_times_set == 1))
1875 int count;
1876 struct movable *m1;
1877 rtx first = NULL_RTX;
1879 /* Now move the insns that set the reg. */
1881 if (m->partial && m->match)
1883 rtx newpat, i1;
1884 rtx r1, r2;
1885 /* Find the end of this chain of matching regs.
1886 Thus, we load each reg in the chain from that one reg.
1887 And that reg is loaded with 0 directly,
1888 since it has ->match == 0. */
1889 for (m1 = m; m1->match; m1 = m1->match);
1890 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1891 SET_DEST (PATTERN (m1->insn)));
1892 i1 = loop_insn_hoist (loop, newpat);
1894 /* Mark the moved, invariant reg as being allowed to
1895 share a hard reg with the other matching invariant. */
1896 REG_NOTES (i1) = REG_NOTES (m->insn);
1897 r1 = SET_DEST (PATTERN (m->insn));
1898 r2 = SET_DEST (PATTERN (m1->insn));
1899 regs_may_share
1900 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1901 gen_rtx_EXPR_LIST (VOIDmode, r2,
1902 regs_may_share));
1903 delete_insn (m->insn);
1905 if (new_start == 0)
1906 new_start = i1;
1908 if (loop_dump_stream)
1909 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1911 /* If we are to re-generate the item being moved with a
1912 new move insn, first delete what we have and then emit
1913 the move insn before the loop. */
1914 else if (m->move_insn)
1916 rtx i1, temp, seq;
1918 for (count = m->consec; count >= 0; count--)
1920 /* If this is the first insn of a library call sequence,
1921 skip to the end. */
1922 if (GET_CODE (p) != NOTE
1923 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1924 p = XEXP (temp, 0);
1926 /* If this is the last insn of a libcall sequence, then
1927 delete every insn in the sequence except the last.
1928 The last insn is handled in the normal manner. */
1929 if (GET_CODE (p) != NOTE
1930 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1932 temp = XEXP (temp, 0);
1933 while (temp != p)
1934 temp = delete_insn (temp);
1937 temp = p;
1938 p = delete_insn (p);
1940 /* simplify_giv_expr expects that it can walk the insns
1941 at m->insn forwards and see this old sequence we are
1942 tossing here. delete_insn does preserve the next
1943 pointers, but when we skip over a NOTE we must fix
1944 it up. Otherwise that code walks into the non-deleted
1945 insn stream. */
1946 while (p && GET_CODE (p) == NOTE)
1947 p = NEXT_INSN (temp) = NEXT_INSN (p);
1950 start_sequence ();
1951 emit_move_insn (m->set_dest, m->set_src);
1952 seq = get_insns ();
1953 end_sequence ();
1955 add_label_notes (m->set_src, seq);
1957 i1 = loop_insn_hoist (loop, seq);
1958 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1959 set_unique_reg_note (i1,
1960 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1961 m->set_src);
1963 if (loop_dump_stream)
1964 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1966 /* The more regs we move, the less we like moving them. */
1967 threshold -= 3;
1969 else
1971 for (count = m->consec; count >= 0; count--)
1973 rtx i1, temp;
1975 /* If first insn of libcall sequence, skip to end. */
1976 /* Do this at start of loop, since p is guaranteed to
1977 be an insn here. */
1978 if (GET_CODE (p) != NOTE
1979 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1980 p = XEXP (temp, 0);
1982 /* If last insn of libcall sequence, move all
1983 insns except the last before the loop. The last
1984 insn is handled in the normal manner. */
1985 if (GET_CODE (p) != NOTE
1986 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1988 rtx fn_address = 0;
1989 rtx fn_reg = 0;
1990 rtx fn_address_insn = 0;
1992 first = 0;
1993 for (temp = XEXP (temp, 0); temp != p;
1994 temp = NEXT_INSN (temp))
1996 rtx body;
1997 rtx n;
1998 rtx next;
2000 if (GET_CODE (temp) == NOTE)
2001 continue;
2003 body = PATTERN (temp);
2005 /* Find the next insn after TEMP,
2006 not counting USE or NOTE insns. */
2007 for (next = NEXT_INSN (temp); next != p;
2008 next = NEXT_INSN (next))
2009 if (! (GET_CODE (next) == INSN
2010 && GET_CODE (PATTERN (next)) == USE)
2011 && GET_CODE (next) != NOTE)
2012 break;
2014 /* If that is the call, this may be the insn
2015 that loads the function address.
2017 Extract the function address from the insn
2018 that loads it into a register.
2019 If this insn was cse'd, we get incorrect code.
2021 So emit a new move insn that copies the
2022 function address into the register that the
2023 call insn will use. flow.c will delete any
2024 redundant stores that we have created. */
2025 if (GET_CODE (next) == CALL_INSN
2026 && GET_CODE (body) == SET
2027 && GET_CODE (SET_DEST (body)) == REG
2028 && (n = find_reg_note (temp, REG_EQUAL,
2029 NULL_RTX)))
2031 fn_reg = SET_SRC (body);
2032 if (GET_CODE (fn_reg) != REG)
2033 fn_reg = SET_DEST (body);
2034 fn_address = XEXP (n, 0);
2035 fn_address_insn = temp;
2037 /* We have the call insn.
2038 If it uses the register we suspect it might,
2039 load it with the correct address directly. */
2040 if (GET_CODE (temp) == CALL_INSN
2041 && fn_address != 0
2042 && reg_referenced_p (fn_reg, body))
2043 loop_insn_emit_after (loop, 0, fn_address_insn,
2044 gen_move_insn
2045 (fn_reg, fn_address));
2047 if (GET_CODE (temp) == CALL_INSN)
2049 i1 = loop_call_insn_hoist (loop, body);
2050 /* Because the USAGE information potentially
2051 contains objects other than hard registers
2052 we need to copy it. */
2053 if (CALL_INSN_FUNCTION_USAGE (temp))
2054 CALL_INSN_FUNCTION_USAGE (i1)
2055 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2057 else
2058 i1 = loop_insn_hoist (loop, body);
2059 if (first == 0)
2060 first = i1;
2061 if (temp == fn_address_insn)
2062 fn_address_insn = i1;
2063 REG_NOTES (i1) = REG_NOTES (temp);
2064 REG_NOTES (temp) = NULL;
2065 delete_insn (temp);
2067 if (new_start == 0)
2068 new_start = first;
2070 if (m->savemode != VOIDmode)
2072 /* P sets REG to zero; but we should clear only
2073 the bits that are not covered by the mode
2074 m->savemode. */
2075 rtx reg = m->set_dest;
2076 rtx sequence;
2077 rtx tem;
2079 start_sequence ();
2080 tem = expand_simple_binop
2081 (GET_MODE (reg), AND, reg,
2082 GEN_INT ((((HOST_WIDE_INT) 1
2083 << GET_MODE_BITSIZE (m->savemode)))
2084 - 1),
2085 reg, 1, OPTAB_LIB_WIDEN);
2086 if (tem == 0)
2087 abort ();
2088 if (tem != reg)
2089 emit_move_insn (reg, tem);
2090 sequence = get_insns ();
2091 end_sequence ();
2092 i1 = loop_insn_hoist (loop, sequence);
2094 else if (GET_CODE (p) == CALL_INSN)
2096 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2097 /* Because the USAGE information potentially
2098 contains objects other than hard registers
2099 we need to copy it. */
2100 if (CALL_INSN_FUNCTION_USAGE (p))
2101 CALL_INSN_FUNCTION_USAGE (i1)
2102 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2104 else if (count == m->consec && m->move_insn_first)
2106 rtx seq;
2107 /* The SET_SRC might not be invariant, so we must
2108 use the REG_EQUAL note. */
2109 start_sequence ();
2110 emit_move_insn (m->set_dest, m->set_src);
2111 seq = get_insns ();
2112 end_sequence ();
2114 add_label_notes (m->set_src, seq);
2116 i1 = loop_insn_hoist (loop, seq);
2117 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2118 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2119 : REG_EQUAL, m->set_src);
2121 else
2122 i1 = loop_insn_hoist (loop, PATTERN (p));
2124 if (REG_NOTES (i1) == 0)
2126 REG_NOTES (i1) = REG_NOTES (p);
2127 REG_NOTES (p) = NULL;
2129 /* If there is a REG_EQUAL note present whose value
2130 is not loop invariant, then delete it, since it
2131 may cause problems with later optimization passes.
2132 It is possible for cse to create such notes
2133 like this as a result of record_jump_cond. */
2135 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2136 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2137 remove_note (i1, temp);
2140 if (new_start == 0)
2141 new_start = i1;
2143 if (loop_dump_stream)
2144 fprintf (loop_dump_stream, " moved to %d",
2145 INSN_UID (i1));
2147 /* If library call, now fix the REG_NOTES that contain
2148 insn pointers, namely REG_LIBCALL on FIRST
2149 and REG_RETVAL on I1. */
2150 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2152 XEXP (temp, 0) = first;
2153 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2154 XEXP (temp, 0) = i1;
2157 temp = p;
2158 delete_insn (p);
2159 p = NEXT_INSN (p);
2161 /* simplify_giv_expr expects that it can walk the insns
2162 at m->insn forwards and see this old sequence we are
2163 tossing here. delete_insn does preserve the next
2164 pointers, but when we skip over a NOTE we must fix
2165 it up. Otherwise that code walks into the non-deleted
2166 insn stream. */
2167 while (p && GET_CODE (p) == NOTE)
2168 p = NEXT_INSN (temp) = NEXT_INSN (p);
2171 /* The more regs we move, the less we like moving them. */
2172 threshold -= 3;
2175 /* Any other movable that loads the same register
2176 MUST be moved. */
2177 already_moved[regno] = 1;
2179 /* This reg has been moved out of one loop. */
2180 regs->array[regno].moved_once = 1;
2182 /* The reg set here is now invariant. */
2183 if (! m->partial)
2185 int i;
2186 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2187 regs->array[regno+i].set_in_loop = 0;
2190 m->done = 1;
2192 /* Change the length-of-life info for the register
2193 to say it lives at least the full length of this loop.
2194 This will help guide optimizations in outer loops. */
2196 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2197 /* This is the old insn before all the moved insns.
2198 We can't use the moved insn because it is out of range
2199 in uid_luid. Only the old insns have luids. */
2200 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2201 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2202 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2204 /* Combine with this moved insn any other matching movables. */
2206 if (! m->partial)
2207 for (m1 = movables->head; m1; m1 = m1->next)
2208 if (m1->match == m)
2210 rtx temp;
2212 /* Schedule the reg loaded by M1
2213 for replacement so that shares the reg of M.
2214 If the modes differ (only possible in restricted
2215 circumstances, make a SUBREG.
2217 Note this assumes that the target dependent files
2218 treat REG and SUBREG equally, including within
2219 GO_IF_LEGITIMATE_ADDRESS and in all the
2220 predicates since we never verify that replacing the
2221 original register with a SUBREG results in a
2222 recognizable insn. */
2223 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2224 reg_map[m1->regno] = m->set_dest;
2225 else
2226 reg_map[m1->regno]
2227 = gen_lowpart_common (GET_MODE (m1->set_dest),
2228 m->set_dest);
2230 /* Get rid of the matching insn
2231 and prevent further processing of it. */
2232 m1->done = 1;
2234 /* if library call, delete all insns. */
2235 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2236 NULL_RTX)))
2237 delete_insn_chain (XEXP (temp, 0), m1->insn);
2238 else
2239 delete_insn (m1->insn);
2241 /* Any other movable that loads the same register
2242 MUST be moved. */
2243 already_moved[m1->regno] = 1;
2245 /* The reg merged here is now invariant,
2246 if the reg it matches is invariant. */
2247 if (! m->partial)
2249 int i;
2250 for (i = 0;
2251 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2252 i++)
2253 regs->array[m1->regno+i].set_in_loop = 0;
2257 else if (loop_dump_stream)
2258 fprintf (loop_dump_stream, "not desirable");
2260 else if (loop_dump_stream && !m->match)
2261 fprintf (loop_dump_stream, "not safe");
2263 if (loop_dump_stream)
2264 fprintf (loop_dump_stream, "\n");
2267 if (new_start == 0)
2268 new_start = loop_start;
2270 /* Go through all the instructions in the loop, making
2271 all the register substitutions scheduled in REG_MAP. */
2272 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2273 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2274 || GET_CODE (p) == CALL_INSN)
2276 replace_regs (PATTERN (p), reg_map, nregs, 0);
2277 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2278 INSN_CODE (p) = -1;
2281 /* Clean up. */
2282 free (reg_map);
2283 free (already_moved);
2287 static void
2288 loop_movables_add (movables, m)
2289 struct loop_movables *movables;
2290 struct movable *m;
2292 if (movables->head == 0)
2293 movables->head = m;
2294 else
2295 movables->last->next = m;
2296 movables->last = m;
2300 static void
2301 loop_movables_free (movables)
2302 struct loop_movables *movables;
2304 struct movable *m;
2305 struct movable *m_next;
2307 for (m = movables->head; m; m = m_next)
2309 m_next = m->next;
2310 free (m);
2314 #if 0
2315 /* Scan X and replace the address of any MEM in it with ADDR.
2316 REG is the address that MEM should have before the replacement. */
2318 static void
2319 replace_call_address (x, reg, addr)
2320 rtx x, reg, addr;
2322 enum rtx_code code;
2323 int i;
2324 const char *fmt;
2326 if (x == 0)
2327 return;
2328 code = GET_CODE (x);
2329 switch (code)
2331 case PC:
2332 case CC0:
2333 case CONST_INT:
2334 case CONST_DOUBLE:
2335 case CONST:
2336 case SYMBOL_REF:
2337 case LABEL_REF:
2338 case REG:
2339 return;
2341 case SET:
2342 /* Short cut for very common case. */
2343 replace_call_address (XEXP (x, 1), reg, addr);
2344 return;
2346 case CALL:
2347 /* Short cut for very common case. */
2348 replace_call_address (XEXP (x, 0), reg, addr);
2349 return;
2351 case MEM:
2352 /* If this MEM uses a reg other than the one we expected,
2353 something is wrong. */
2354 if (XEXP (x, 0) != reg)
2355 abort ();
2356 XEXP (x, 0) = addr;
2357 return;
2359 default:
2360 break;
2363 fmt = GET_RTX_FORMAT (code);
2364 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2366 if (fmt[i] == 'e')
2367 replace_call_address (XEXP (x, i), reg, addr);
2368 else if (fmt[i] == 'E')
2370 int j;
2371 for (j = 0; j < XVECLEN (x, i); j++)
2372 replace_call_address (XVECEXP (x, i, j), reg, addr);
2376 #endif
2378 /* Return the number of memory refs to addresses that vary
2379 in the rtx X. */
2381 static int
2382 count_nonfixed_reads (loop, x)
2383 const struct loop *loop;
2384 rtx x;
2386 enum rtx_code code;
2387 int i;
2388 const char *fmt;
2389 int value;
2391 if (x == 0)
2392 return 0;
2394 code = GET_CODE (x);
2395 switch (code)
2397 case PC:
2398 case CC0:
2399 case CONST_INT:
2400 case CONST_DOUBLE:
2401 case CONST:
2402 case SYMBOL_REF:
2403 case LABEL_REF:
2404 case REG:
2405 return 0;
2407 case MEM:
2408 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2409 + count_nonfixed_reads (loop, XEXP (x, 0)));
2411 default:
2412 break;
2415 value = 0;
2416 fmt = GET_RTX_FORMAT (code);
2417 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2419 if (fmt[i] == 'e')
2420 value += count_nonfixed_reads (loop, XEXP (x, i));
2421 if (fmt[i] == 'E')
2423 int j;
2424 for (j = 0; j < XVECLEN (x, i); j++)
2425 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2428 return value;
2431 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2432 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2433 `unknown_address_altered', `unknown_constant_address_altered', and
2434 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2435 list `store_mems' in LOOP. */
2437 static void
2438 prescan_loop (loop)
2439 struct loop *loop;
2441 int level = 1;
2442 rtx insn;
2443 struct loop_info *loop_info = LOOP_INFO (loop);
2444 rtx start = loop->start;
2445 rtx end = loop->end;
2446 /* The label after END. Jumping here is just like falling off the
2447 end of the loop. We use next_nonnote_insn instead of next_label
2448 as a hedge against the (pathological) case where some actual insn
2449 might end up between the two. */
2450 rtx exit_target = next_nonnote_insn (end);
2452 loop_info->has_indirect_jump = indirect_jump_in_function;
2453 loop_info->pre_header_has_call = 0;
2454 loop_info->has_call = 0;
2455 loop_info->has_nonconst_call = 0;
2456 loop_info->has_prefetch = 0;
2457 loop_info->has_volatile = 0;
2458 loop_info->has_tablejump = 0;
2459 loop_info->has_multiple_exit_targets = 0;
2460 loop->level = 1;
2462 loop_info->unknown_address_altered = 0;
2463 loop_info->unknown_constant_address_altered = 0;
2464 loop_info->store_mems = NULL_RTX;
2465 loop_info->first_loop_store_insn = NULL_RTX;
2466 loop_info->mems_idx = 0;
2467 loop_info->num_mem_sets = 0;
2470 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2471 insn = PREV_INSN (insn))
2473 if (GET_CODE (insn) == CALL_INSN)
2475 loop_info->pre_header_has_call = 1;
2476 break;
2480 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2481 insn = NEXT_INSN (insn))
2483 switch (GET_CODE (insn))
2485 case NOTE:
2486 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2488 ++level;
2489 /* Count number of loops contained in this one. */
2490 loop->level++;
2492 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2493 --level;
2494 break;
2496 case CALL_INSN:
2497 if (! CONST_OR_PURE_CALL_P (insn))
2499 loop_info->unknown_address_altered = 1;
2500 loop_info->has_nonconst_call = 1;
2502 else if (pure_call_p (insn))
2503 loop_info->has_nonconst_call = 1;
2504 loop_info->has_call = 1;
2505 if (can_throw_internal (insn))
2506 loop_info->has_multiple_exit_targets = 1;
2507 break;
2509 case JUMP_INSN:
2510 if (! loop_info->has_multiple_exit_targets)
2512 rtx set = pc_set (insn);
2514 if (set)
2516 rtx src = SET_SRC (set);
2517 rtx label1, label2;
2519 if (GET_CODE (src) == IF_THEN_ELSE)
2521 label1 = XEXP (src, 1);
2522 label2 = XEXP (src, 2);
2524 else
2526 label1 = src;
2527 label2 = NULL_RTX;
2532 if (label1 && label1 != pc_rtx)
2534 if (GET_CODE (label1) != LABEL_REF)
2536 /* Something tricky. */
2537 loop_info->has_multiple_exit_targets = 1;
2538 break;
2540 else if (XEXP (label1, 0) != exit_target
2541 && LABEL_OUTSIDE_LOOP_P (label1))
2543 /* A jump outside the current loop. */
2544 loop_info->has_multiple_exit_targets = 1;
2545 break;
2549 label1 = label2;
2550 label2 = NULL_RTX;
2552 while (label1);
2554 else
2556 /* A return, or something tricky. */
2557 loop_info->has_multiple_exit_targets = 1;
2560 /* FALLTHRU */
2562 case INSN:
2563 if (volatile_refs_p (PATTERN (insn)))
2564 loop_info->has_volatile = 1;
2566 if (GET_CODE (insn) == JUMP_INSN
2567 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2568 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2569 loop_info->has_tablejump = 1;
2571 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2572 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2573 loop_info->first_loop_store_insn = insn;
2575 if (flag_non_call_exceptions && can_throw_internal (insn))
2576 loop_info->has_multiple_exit_targets = 1;
2577 break;
2579 default:
2580 break;
2584 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2585 if (/* An exception thrown by a called function might land us
2586 anywhere. */
2587 ! loop_info->has_nonconst_call
2588 /* We don't want loads for MEMs moved to a location before the
2589 one at which their stack memory becomes allocated. (Note
2590 that this is not a problem for malloc, etc., since those
2591 require actual function calls. */
2592 && ! current_function_calls_alloca
2593 /* There are ways to leave the loop other than falling off the
2594 end. */
2595 && ! loop_info->has_multiple_exit_targets)
2596 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2597 insn = NEXT_INSN (insn))
2598 for_each_rtx (&insn, insert_loop_mem, loop_info);
2600 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2601 that loop_invariant_p and load_mems can use true_dependence
2602 to determine what is really clobbered. */
2603 if (loop_info->unknown_address_altered)
2605 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2607 loop_info->store_mems
2608 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2610 if (loop_info->unknown_constant_address_altered)
2612 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2614 RTX_UNCHANGING_P (mem) = 1;
2615 loop_info->store_mems
2616 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2620 /* Invalidate all loops containing LABEL. */
2622 static void
2623 invalidate_loops_containing_label (label)
2624 rtx label;
2626 struct loop *loop;
2627 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2628 loop->invalid = 1;
2631 /* Scan the function looking for loops. Record the start and end of each loop.
2632 Also mark as invalid loops any loops that contain a setjmp or are branched
2633 to from outside the loop. */
2635 static void
2636 find_and_verify_loops (f, loops)
2637 rtx f;
2638 struct loops *loops;
2640 rtx insn;
2641 rtx label;
2642 int num_loops;
2643 struct loop *current_loop;
2644 struct loop *next_loop;
2645 struct loop *loop;
2647 num_loops = loops->num;
2649 compute_luids (f, NULL_RTX, 0);
2651 /* If there are jumps to undefined labels,
2652 treat them as jumps out of any/all loops.
2653 This also avoids writing past end of tables when there are no loops. */
2654 uid_loop[0] = NULL;
2656 /* Find boundaries of loops, mark which loops are contained within
2657 loops, and invalidate loops that have setjmp. */
2659 num_loops = 0;
2660 current_loop = NULL;
2661 for (insn = f; insn; insn = NEXT_INSN (insn))
2663 if (GET_CODE (insn) == NOTE)
2664 switch (NOTE_LINE_NUMBER (insn))
2666 case NOTE_INSN_LOOP_BEG:
2667 next_loop = loops->array + num_loops;
2668 next_loop->num = num_loops;
2669 num_loops++;
2670 next_loop->start = insn;
2671 next_loop->outer = current_loop;
2672 current_loop = next_loop;
2673 break;
2675 case NOTE_INSN_LOOP_CONT:
2676 current_loop->cont = insn;
2677 break;
2679 case NOTE_INSN_LOOP_VTOP:
2680 current_loop->vtop = insn;
2681 break;
2683 case NOTE_INSN_LOOP_END:
2684 if (! current_loop)
2685 abort ();
2687 current_loop->end = insn;
2688 current_loop = current_loop->outer;
2689 break;
2691 default:
2692 break;
2695 if (GET_CODE (insn) == CALL_INSN
2696 && find_reg_note (insn, REG_SETJMP, NULL))
2698 /* In this case, we must invalidate our current loop and any
2699 enclosing loop. */
2700 for (loop = current_loop; loop; loop = loop->outer)
2702 loop->invalid = 1;
2703 if (loop_dump_stream)
2704 fprintf (loop_dump_stream,
2705 "\nLoop at %d ignored due to setjmp.\n",
2706 INSN_UID (loop->start));
2710 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2711 enclosing loop, but this doesn't matter. */
2712 uid_loop[INSN_UID (insn)] = current_loop;
2715 /* Any loop containing a label used in an initializer must be invalidated,
2716 because it can be jumped into from anywhere. */
2717 for (label = forced_labels; label; label = XEXP (label, 1))
2718 invalidate_loops_containing_label (XEXP (label, 0));
2720 /* Any loop containing a label used for an exception handler must be
2721 invalidated, because it can be jumped into from anywhere. */
2722 for_each_eh_label (invalidate_loops_containing_label);
2724 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2725 loop that it is not contained within, that loop is marked invalid.
2726 If any INSN or CALL_INSN uses a label's address, then the loop containing
2727 that label is marked invalid, because it could be jumped into from
2728 anywhere.
2730 Also look for blocks of code ending in an unconditional branch that
2731 exits the loop. If such a block is surrounded by a conditional
2732 branch around the block, move the block elsewhere (see below) and
2733 invert the jump to point to the code block. This may eliminate a
2734 label in our loop and will simplify processing by both us and a
2735 possible second cse pass. */
2737 for (insn = f; insn; insn = NEXT_INSN (insn))
2738 if (INSN_P (insn))
2740 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2742 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2744 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2745 if (note)
2746 invalidate_loops_containing_label (XEXP (note, 0));
2749 if (GET_CODE (insn) != JUMP_INSN)
2750 continue;
2752 mark_loop_jump (PATTERN (insn), this_loop);
2754 /* See if this is an unconditional branch outside the loop. */
2755 if (this_loop
2756 && (GET_CODE (PATTERN (insn)) == RETURN
2757 || (any_uncondjump_p (insn)
2758 && onlyjump_p (insn)
2759 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2760 != this_loop)))
2761 && get_max_uid () < max_uid_for_loop)
2763 rtx p;
2764 rtx our_next = next_real_insn (insn);
2765 rtx last_insn_to_move = NEXT_INSN (insn);
2766 struct loop *dest_loop;
2767 struct loop *outer_loop = NULL;
2769 /* Go backwards until we reach the start of the loop, a label,
2770 or a JUMP_INSN. */
2771 for (p = PREV_INSN (insn);
2772 GET_CODE (p) != CODE_LABEL
2773 && ! (GET_CODE (p) == NOTE
2774 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2775 && GET_CODE (p) != JUMP_INSN;
2776 p = PREV_INSN (p))
2779 /* Check for the case where we have a jump to an inner nested
2780 loop, and do not perform the optimization in that case. */
2782 if (JUMP_LABEL (insn))
2784 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2785 if (dest_loop)
2787 for (outer_loop = dest_loop; outer_loop;
2788 outer_loop = outer_loop->outer)
2789 if (outer_loop == this_loop)
2790 break;
2794 /* Make sure that the target of P is within the current loop. */
2796 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2797 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2798 outer_loop = this_loop;
2800 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2801 we have a block of code to try to move.
2803 We look backward and then forward from the target of INSN
2804 to find a BARRIER at the same loop depth as the target.
2805 If we find such a BARRIER, we make a new label for the start
2806 of the block, invert the jump in P and point it to that label,
2807 and move the block of code to the spot we found. */
2809 if (! outer_loop
2810 && GET_CODE (p) == JUMP_INSN
2811 && JUMP_LABEL (p) != 0
2812 /* Just ignore jumps to labels that were never emitted.
2813 These always indicate compilation errors. */
2814 && INSN_UID (JUMP_LABEL (p)) != 0
2815 && any_condjump_p (p) && onlyjump_p (p)
2816 && next_real_insn (JUMP_LABEL (p)) == our_next
2817 /* If it's not safe to move the sequence, then we
2818 mustn't try. */
2819 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2820 &last_insn_to_move))
2822 rtx target
2823 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2824 struct loop *target_loop = uid_loop[INSN_UID (target)];
2825 rtx loc, loc2;
2826 rtx tmp;
2828 /* Search for possible garbage past the conditional jumps
2829 and look for the last barrier. */
2830 for (tmp = last_insn_to_move;
2831 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2832 if (GET_CODE (tmp) == BARRIER)
2833 last_insn_to_move = tmp;
2835 for (loc = target; loc; loc = PREV_INSN (loc))
2836 if (GET_CODE (loc) == BARRIER
2837 /* Don't move things inside a tablejump. */
2838 && ((loc2 = next_nonnote_insn (loc)) == 0
2839 || GET_CODE (loc2) != CODE_LABEL
2840 || (loc2 = next_nonnote_insn (loc2)) == 0
2841 || GET_CODE (loc2) != JUMP_INSN
2842 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2843 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2844 && uid_loop[INSN_UID (loc)] == target_loop)
2845 break;
2847 if (loc == 0)
2848 for (loc = target; loc; loc = NEXT_INSN (loc))
2849 if (GET_CODE (loc) == BARRIER
2850 /* Don't move things inside a tablejump. */
2851 && ((loc2 = next_nonnote_insn (loc)) == 0
2852 || GET_CODE (loc2) != CODE_LABEL
2853 || (loc2 = next_nonnote_insn (loc2)) == 0
2854 || GET_CODE (loc2) != JUMP_INSN
2855 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2856 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2857 && uid_loop[INSN_UID (loc)] == target_loop)
2858 break;
2860 if (loc)
2862 rtx cond_label = JUMP_LABEL (p);
2863 rtx new_label = get_label_after (p);
2865 /* Ensure our label doesn't go away. */
2866 LABEL_NUSES (cond_label)++;
2868 /* Verify that uid_loop is large enough and that
2869 we can invert P. */
2870 if (invert_jump (p, new_label, 1))
2872 rtx q, r;
2874 /* If no suitable BARRIER was found, create a suitable
2875 one before TARGET. Since TARGET is a fall through
2876 path, we'll need to insert an jump around our block
2877 and add a BARRIER before TARGET.
2879 This creates an extra unconditional jump outside
2880 the loop. However, the benefits of removing rarely
2881 executed instructions from inside the loop usually
2882 outweighs the cost of the extra unconditional jump
2883 outside the loop. */
2884 if (loc == 0)
2886 rtx temp;
2888 temp = gen_jump (JUMP_LABEL (insn));
2889 temp = emit_jump_insn_before (temp, target);
2890 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2891 LABEL_NUSES (JUMP_LABEL (insn))++;
2892 loc = emit_barrier_before (target);
2895 /* Include the BARRIER after INSN and copy the
2896 block after LOC. */
2897 if (squeeze_notes (&new_label, &last_insn_to_move))
2898 abort ();
2899 reorder_insns (new_label, last_insn_to_move, loc);
2901 /* All those insns are now in TARGET_LOOP. */
2902 for (q = new_label;
2903 q != NEXT_INSN (last_insn_to_move);
2904 q = NEXT_INSN (q))
2905 uid_loop[INSN_UID (q)] = target_loop;
2907 /* The label jumped to by INSN is no longer a loop
2908 exit. Unless INSN does not have a label (e.g.,
2909 it is a RETURN insn), search loop->exit_labels
2910 to find its label_ref, and remove it. Also turn
2911 off LABEL_OUTSIDE_LOOP_P bit. */
2912 if (JUMP_LABEL (insn))
2914 for (q = 0, r = this_loop->exit_labels;
2916 q = r, r = LABEL_NEXTREF (r))
2917 if (XEXP (r, 0) == JUMP_LABEL (insn))
2919 LABEL_OUTSIDE_LOOP_P (r) = 0;
2920 if (q)
2921 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2922 else
2923 this_loop->exit_labels = LABEL_NEXTREF (r);
2924 break;
2927 for (loop = this_loop; loop && loop != target_loop;
2928 loop = loop->outer)
2929 loop->exit_count--;
2931 /* If we didn't find it, then something is
2932 wrong. */
2933 if (! r)
2934 abort ();
2937 /* P is now a jump outside the loop, so it must be put
2938 in loop->exit_labels, and marked as such.
2939 The easiest way to do this is to just call
2940 mark_loop_jump again for P. */
2941 mark_loop_jump (PATTERN (p), this_loop);
2943 /* If INSN now jumps to the insn after it,
2944 delete INSN. */
2945 if (JUMP_LABEL (insn) != 0
2946 && (next_real_insn (JUMP_LABEL (insn))
2947 == next_real_insn (insn)))
2948 delete_related_insns (insn);
2951 /* Continue the loop after where the conditional
2952 branch used to jump, since the only branch insn
2953 in the block (if it still remains) is an inter-loop
2954 branch and hence needs no processing. */
2955 insn = NEXT_INSN (cond_label);
2957 if (--LABEL_NUSES (cond_label) == 0)
2958 delete_related_insns (cond_label);
2960 /* This loop will be continued with NEXT_INSN (insn). */
2961 insn = PREV_INSN (insn);
2968 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2969 loops it is contained in, mark the target loop invalid.
2971 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2973 static void
2974 mark_loop_jump (x, loop)
2975 rtx x;
2976 struct loop *loop;
2978 struct loop *dest_loop;
2979 struct loop *outer_loop;
2980 int i;
2982 switch (GET_CODE (x))
2984 case PC:
2985 case USE:
2986 case CLOBBER:
2987 case REG:
2988 case MEM:
2989 case CONST_INT:
2990 case CONST_DOUBLE:
2991 case RETURN:
2992 return;
2994 case CONST:
2995 /* There could be a label reference in here. */
2996 mark_loop_jump (XEXP (x, 0), loop);
2997 return;
2999 case PLUS:
3000 case MINUS:
3001 case MULT:
3002 mark_loop_jump (XEXP (x, 0), loop);
3003 mark_loop_jump (XEXP (x, 1), loop);
3004 return;
3006 case LO_SUM:
3007 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3008 mark_loop_jump (XEXP (x, 1), loop);
3009 return;
3011 case SIGN_EXTEND:
3012 case ZERO_EXTEND:
3013 mark_loop_jump (XEXP (x, 0), loop);
3014 return;
3016 case LABEL_REF:
3017 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3019 /* Link together all labels that branch outside the loop. This
3020 is used by final_[bg]iv_value and the loop unrolling code. Also
3021 mark this LABEL_REF so we know that this branch should predict
3022 false. */
3024 /* A check to make sure the label is not in an inner nested loop,
3025 since this does not count as a loop exit. */
3026 if (dest_loop)
3028 for (outer_loop = dest_loop; outer_loop;
3029 outer_loop = outer_loop->outer)
3030 if (outer_loop == loop)
3031 break;
3033 else
3034 outer_loop = NULL;
3036 if (loop && ! outer_loop)
3038 LABEL_OUTSIDE_LOOP_P (x) = 1;
3039 LABEL_NEXTREF (x) = loop->exit_labels;
3040 loop->exit_labels = x;
3042 for (outer_loop = loop;
3043 outer_loop && outer_loop != dest_loop;
3044 outer_loop = outer_loop->outer)
3045 outer_loop->exit_count++;
3048 /* If this is inside a loop, but not in the current loop or one enclosed
3049 by it, it invalidates at least one loop. */
3051 if (! dest_loop)
3052 return;
3054 /* We must invalidate every nested loop containing the target of this
3055 label, except those that also contain the jump insn. */
3057 for (; dest_loop; dest_loop = dest_loop->outer)
3059 /* Stop when we reach a loop that also contains the jump insn. */
3060 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3061 if (dest_loop == outer_loop)
3062 return;
3064 /* If we get here, we know we need to invalidate a loop. */
3065 if (loop_dump_stream && ! dest_loop->invalid)
3066 fprintf (loop_dump_stream,
3067 "\nLoop at %d ignored due to multiple entry points.\n",
3068 INSN_UID (dest_loop->start));
3070 dest_loop->invalid = 1;
3072 return;
3074 case SET:
3075 /* If this is not setting pc, ignore. */
3076 if (SET_DEST (x) == pc_rtx)
3077 mark_loop_jump (SET_SRC (x), loop);
3078 return;
3080 case IF_THEN_ELSE:
3081 mark_loop_jump (XEXP (x, 1), loop);
3082 mark_loop_jump (XEXP (x, 2), loop);
3083 return;
3085 case PARALLEL:
3086 case ADDR_VEC:
3087 for (i = 0; i < XVECLEN (x, 0); i++)
3088 mark_loop_jump (XVECEXP (x, 0, i), loop);
3089 return;
3091 case ADDR_DIFF_VEC:
3092 for (i = 0; i < XVECLEN (x, 1); i++)
3093 mark_loop_jump (XVECEXP (x, 1, i), loop);
3094 return;
3096 default:
3097 /* Strictly speaking this is not a jump into the loop, only a possible
3098 jump out of the loop. However, we have no way to link the destination
3099 of this jump onto the list of exit labels. To be safe we mark this
3100 loop and any containing loops as invalid. */
3101 if (loop)
3103 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3105 if (loop_dump_stream && ! outer_loop->invalid)
3106 fprintf (loop_dump_stream,
3107 "\nLoop at %d ignored due to unknown exit jump.\n",
3108 INSN_UID (outer_loop->start));
3109 outer_loop->invalid = 1;
3112 return;
3116 /* Return nonzero if there is a label in the range from
3117 insn INSN to and including the insn whose luid is END
3118 INSN must have an assigned luid (i.e., it must not have
3119 been previously created by loop.c). */
3121 static int
3122 labels_in_range_p (insn, end)
3123 rtx insn;
3124 int end;
3126 while (insn && INSN_LUID (insn) <= end)
3128 if (GET_CODE (insn) == CODE_LABEL)
3129 return 1;
3130 insn = NEXT_INSN (insn);
3133 return 0;
3136 /* Record that a memory reference X is being set. */
3138 static void
3139 note_addr_stored (x, y, data)
3140 rtx x;
3141 rtx y ATTRIBUTE_UNUSED;
3142 void *data ATTRIBUTE_UNUSED;
3144 struct loop_info *loop_info = data;
3146 if (x == 0 || GET_CODE (x) != MEM)
3147 return;
3149 /* Count number of memory writes.
3150 This affects heuristics in strength_reduce. */
3151 loop_info->num_mem_sets++;
3153 /* BLKmode MEM means all memory is clobbered. */
3154 if (GET_MODE (x) == BLKmode)
3156 if (RTX_UNCHANGING_P (x))
3157 loop_info->unknown_constant_address_altered = 1;
3158 else
3159 loop_info->unknown_address_altered = 1;
3161 return;
3164 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3165 loop_info->store_mems);
3168 /* X is a value modified by an INSN that references a biv inside a loop
3169 exit test (ie, X is somehow related to the value of the biv). If X
3170 is a pseudo that is used more than once, then the biv is (effectively)
3171 used more than once. DATA is a pointer to a loop_regs structure. */
3173 static void
3174 note_set_pseudo_multiple_uses (x, y, data)
3175 rtx x;
3176 rtx y ATTRIBUTE_UNUSED;
3177 void *data;
3179 struct loop_regs *regs = (struct loop_regs *) data;
3181 if (x == 0)
3182 return;
3184 while (GET_CODE (x) == STRICT_LOW_PART
3185 || GET_CODE (x) == SIGN_EXTRACT
3186 || GET_CODE (x) == ZERO_EXTRACT
3187 || GET_CODE (x) == SUBREG)
3188 x = XEXP (x, 0);
3190 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3191 return;
3193 /* If we do not have usage information, or if we know the register
3194 is used more than once, note that fact for check_dbra_loop. */
3195 if (REGNO (x) >= max_reg_before_loop
3196 || ! regs->array[REGNO (x)].single_usage
3197 || regs->array[REGNO (x)].single_usage == const0_rtx)
3198 regs->multiple_uses = 1;
3201 /* Return nonzero if the rtx X is invariant over the current loop.
3203 The value is 2 if we refer to something only conditionally invariant.
3205 A memory ref is invariant if it is not volatile and does not conflict
3206 with anything stored in `loop_info->store_mems'. */
3209 loop_invariant_p (loop, x)
3210 const struct loop *loop;
3211 rtx x;
3213 struct loop_info *loop_info = LOOP_INFO (loop);
3214 struct loop_regs *regs = LOOP_REGS (loop);
3215 int i;
3216 enum rtx_code code;
3217 const char *fmt;
3218 int conditional = 0;
3219 rtx mem_list_entry;
3221 if (x == 0)
3222 return 1;
3223 code = GET_CODE (x);
3224 switch (code)
3226 case CONST_INT:
3227 case CONST_DOUBLE:
3228 case SYMBOL_REF:
3229 case CONST:
3230 return 1;
3232 case LABEL_REF:
3233 /* A LABEL_REF is normally invariant, however, if we are unrolling
3234 loops, and this label is inside the loop, then it isn't invariant.
3235 This is because each unrolled copy of the loop body will have
3236 a copy of this label. If this was invariant, then an insn loading
3237 the address of this label into a register might get moved outside
3238 the loop, and then each loop body would end up using the same label.
3240 We don't know the loop bounds here though, so just fail for all
3241 labels. */
3242 if (flag_unroll_loops)
3243 return 0;
3244 else
3245 return 1;
3247 case PC:
3248 case CC0:
3249 case UNSPEC_VOLATILE:
3250 return 0;
3252 case REG:
3253 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3254 since the reg might be set by initialization within the loop. */
3256 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3257 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3258 && ! current_function_has_nonlocal_goto)
3259 return 1;
3261 if (LOOP_INFO (loop)->has_call
3262 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3263 return 0;
3265 if (regs->array[REGNO (x)].set_in_loop < 0)
3266 return 2;
3268 return regs->array[REGNO (x)].set_in_loop == 0;
3270 case MEM:
3271 /* Volatile memory references must be rejected. Do this before
3272 checking for read-only items, so that volatile read-only items
3273 will be rejected also. */
3274 if (MEM_VOLATILE_P (x))
3275 return 0;
3277 /* See if there is any dependence between a store and this load. */
3278 mem_list_entry = loop_info->store_mems;
3279 while (mem_list_entry)
3281 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3282 x, rtx_varies_p))
3283 return 0;
3285 mem_list_entry = XEXP (mem_list_entry, 1);
3288 /* It's not invalidated by a store in memory
3289 but we must still verify the address is invariant. */
3290 break;
3292 case ASM_OPERANDS:
3293 /* Don't mess with insns declared volatile. */
3294 if (MEM_VOLATILE_P (x))
3295 return 0;
3296 break;
3298 default:
3299 break;
3302 fmt = GET_RTX_FORMAT (code);
3303 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3305 if (fmt[i] == 'e')
3307 int tem = loop_invariant_p (loop, XEXP (x, i));
3308 if (tem == 0)
3309 return 0;
3310 if (tem == 2)
3311 conditional = 1;
3313 else if (fmt[i] == 'E')
3315 int j;
3316 for (j = 0; j < XVECLEN (x, i); j++)
3318 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3319 if (tem == 0)
3320 return 0;
3321 if (tem == 2)
3322 conditional = 1;
3328 return 1 + conditional;
3331 /* Return nonzero if all the insns in the loop that set REG
3332 are INSN and the immediately following insns,
3333 and if each of those insns sets REG in an invariant way
3334 (not counting uses of REG in them).
3336 The value is 2 if some of these insns are only conditionally invariant.
3338 We assume that INSN itself is the first set of REG
3339 and that its source is invariant. */
3341 static int
3342 consec_sets_invariant_p (loop, reg, n_sets, insn)
3343 const struct loop *loop;
3344 int n_sets;
3345 rtx reg, insn;
3347 struct loop_regs *regs = LOOP_REGS (loop);
3348 rtx p = insn;
3349 unsigned int regno = REGNO (reg);
3350 rtx temp;
3351 /* Number of sets we have to insist on finding after INSN. */
3352 int count = n_sets - 1;
3353 int old = regs->array[regno].set_in_loop;
3354 int value = 0;
3355 int this;
3357 /* If N_SETS hit the limit, we can't rely on its value. */
3358 if (n_sets == 127)
3359 return 0;
3361 regs->array[regno].set_in_loop = 0;
3363 while (count > 0)
3365 enum rtx_code code;
3366 rtx set;
3368 p = NEXT_INSN (p);
3369 code = GET_CODE (p);
3371 /* If library call, skip to end of it. */
3372 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3373 p = XEXP (temp, 0);
3375 this = 0;
3376 if (code == INSN
3377 && (set = single_set (p))
3378 && GET_CODE (SET_DEST (set)) == REG
3379 && REGNO (SET_DEST (set)) == regno)
3381 this = loop_invariant_p (loop, SET_SRC (set));
3382 if (this != 0)
3383 value |= this;
3384 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3386 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3387 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3388 notes are OK. */
3389 this = (CONSTANT_P (XEXP (temp, 0))
3390 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3391 && loop_invariant_p (loop, XEXP (temp, 0))));
3392 if (this != 0)
3393 value |= this;
3396 if (this != 0)
3397 count--;
3398 else if (code != NOTE)
3400 regs->array[regno].set_in_loop = old;
3401 return 0;
3405 regs->array[regno].set_in_loop = old;
3406 /* If loop_invariant_p ever returned 2, we return 2. */
3407 return 1 + (value & 2);
3410 #if 0
3411 /* I don't think this condition is sufficient to allow INSN
3412 to be moved, so we no longer test it. */
3414 /* Return 1 if all insns in the basic block of INSN and following INSN
3415 that set REG are invariant according to TABLE. */
3417 static int
3418 all_sets_invariant_p (reg, insn, table)
3419 rtx reg, insn;
3420 short *table;
3422 rtx p = insn;
3423 int regno = REGNO (reg);
3425 while (1)
3427 enum rtx_code code;
3428 p = NEXT_INSN (p);
3429 code = GET_CODE (p);
3430 if (code == CODE_LABEL || code == JUMP_INSN)
3431 return 1;
3432 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3433 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3434 && REGNO (SET_DEST (PATTERN (p))) == regno)
3436 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3437 return 0;
3441 #endif /* 0 */
3443 /* Look at all uses (not sets) of registers in X. For each, if it is
3444 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3445 a different insn, set USAGE[REGNO] to const0_rtx. */
3447 static void
3448 find_single_use_in_loop (regs, insn, x)
3449 struct loop_regs *regs;
3450 rtx insn;
3451 rtx x;
3453 enum rtx_code code = GET_CODE (x);
3454 const char *fmt = GET_RTX_FORMAT (code);
3455 int i, j;
3457 if (code == REG)
3458 regs->array[REGNO (x)].single_usage
3459 = (regs->array[REGNO (x)].single_usage != 0
3460 && regs->array[REGNO (x)].single_usage != insn)
3461 ? const0_rtx : insn;
3463 else if (code == SET)
3465 /* Don't count SET_DEST if it is a REG; otherwise count things
3466 in SET_DEST because if a register is partially modified, it won't
3467 show up as a potential movable so we don't care how USAGE is set
3468 for it. */
3469 if (GET_CODE (SET_DEST (x)) != REG)
3470 find_single_use_in_loop (regs, insn, SET_DEST (x));
3471 find_single_use_in_loop (regs, insn, SET_SRC (x));
3473 else
3474 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3476 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3477 find_single_use_in_loop (regs, insn, XEXP (x, i));
3478 else if (fmt[i] == 'E')
3479 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3480 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3484 /* Count and record any set in X which is contained in INSN. Update
3485 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3486 in X. */
3488 static void
3489 count_one_set (regs, insn, x, last_set)
3490 struct loop_regs *regs;
3491 rtx insn, x;
3492 rtx *last_set;
3494 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3495 /* Don't move a reg that has an explicit clobber.
3496 It's not worth the pain to try to do it correctly. */
3497 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3499 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3501 rtx dest = SET_DEST (x);
3502 while (GET_CODE (dest) == SUBREG
3503 || GET_CODE (dest) == ZERO_EXTRACT
3504 || GET_CODE (dest) == SIGN_EXTRACT
3505 || GET_CODE (dest) == STRICT_LOW_PART)
3506 dest = XEXP (dest, 0);
3507 if (GET_CODE (dest) == REG)
3509 int i;
3510 int regno = REGNO (dest);
3511 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3513 /* If this is the first setting of this reg
3514 in current basic block, and it was set before,
3515 it must be set in two basic blocks, so it cannot
3516 be moved out of the loop. */
3517 if (regs->array[regno].set_in_loop > 0
3518 && last_set == 0)
3519 regs->array[regno+i].may_not_optimize = 1;
3520 /* If this is not first setting in current basic block,
3521 see if reg was used in between previous one and this.
3522 If so, neither one can be moved. */
3523 if (last_set[regno] != 0
3524 && reg_used_between_p (dest, last_set[regno], insn))
3525 regs->array[regno+i].may_not_optimize = 1;
3526 if (regs->array[regno+i].set_in_loop < 127)
3527 ++regs->array[regno+i].set_in_loop;
3528 last_set[regno+i] = insn;
3534 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3535 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3536 contained in insn INSN is used by any insn that precedes INSN in
3537 cyclic order starting from the loop entry point.
3539 We don't want to use INSN_LUID here because if we restrict INSN to those
3540 that have a valid INSN_LUID, it means we cannot move an invariant out
3541 from an inner loop past two loops. */
3543 static int
3544 loop_reg_used_before_p (loop, set, insn)
3545 const struct loop *loop;
3546 rtx set, insn;
3548 rtx reg = SET_DEST (set);
3549 rtx p;
3551 /* Scan forward checking for register usage. If we hit INSN, we
3552 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3553 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3555 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3556 return 1;
3558 if (p == loop->end)
3559 p = loop->start;
3562 return 0;
3566 /* Information we collect about arrays that we might want to prefetch. */
3567 struct prefetch_info
3569 struct iv_class *class; /* Class this prefetch is based on. */
3570 struct induction *giv; /* GIV this prefetch is based on. */
3571 rtx base_address; /* Start prefetching from this address plus
3572 index. */
3573 HOST_WIDE_INT index;
3574 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3575 iteration. */
3576 unsigned int bytes_accessed; /* Sum of sizes of all acceses to this
3577 prefetch area in one iteration. */
3578 unsigned int total_bytes; /* Total bytes loop will access in this block.
3579 This is set only for loops with known
3580 iteration counts and is 0xffffffff
3581 otherwise. */
3582 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3583 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3584 unsigned int write : 1; /* 1 for read/write prefetches. */
3587 /* Data used by check_store function. */
3588 struct check_store_data
3590 rtx mem_address;
3591 int mem_write;
3594 static void check_store PARAMS ((rtx, rtx, void *));
3595 static void emit_prefetch_instructions PARAMS ((struct loop *));
3596 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3598 /* Set mem_write when mem_address is found. Used as callback to
3599 note_stores. */
3600 static void
3601 check_store (x, pat, data)
3602 rtx x, pat ATTRIBUTE_UNUSED;
3603 void *data;
3605 struct check_store_data *d = (struct check_store_data *) data;
3607 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3608 d->mem_write = 1;
3611 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3612 important to get some addresses combined. Later more sophisticated
3613 transformations can be added when necesary.
3615 ??? Same trick with swapping operand is done at several other places.
3616 It can be nice to develop some common way to handle this. */
3618 static int
3619 rtx_equal_for_prefetch_p (x, y)
3620 rtx x, y;
3622 int i;
3623 int j;
3624 enum rtx_code code = GET_CODE (x);
3625 const char *fmt;
3627 if (x == y)
3628 return 1;
3629 if (code != GET_CODE (y))
3630 return 0;
3632 code = GET_CODE (x);
3634 if (GET_RTX_CLASS (code) == 'c')
3636 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3637 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3638 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3639 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3641 /* Compare the elements. If any pair of corresponding elements fails to
3642 match, return 0 for the whole thing. */
3644 fmt = GET_RTX_FORMAT (code);
3645 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3647 switch (fmt[i])
3649 case 'w':
3650 if (XWINT (x, i) != XWINT (y, i))
3651 return 0;
3652 break;
3654 case 'i':
3655 if (XINT (x, i) != XINT (y, i))
3656 return 0;
3657 break;
3659 case 'E':
3660 /* Two vectors must have the same length. */
3661 if (XVECLEN (x, i) != XVECLEN (y, i))
3662 return 0;
3664 /* And the corresponding elements must match. */
3665 for (j = 0; j < XVECLEN (x, i); j++)
3666 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3667 XVECEXP (y, i, j)) == 0)
3668 return 0;
3669 break;
3671 case 'e':
3672 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3673 return 0;
3674 break;
3676 case 's':
3677 if (strcmp (XSTR (x, i), XSTR (y, i)))
3678 return 0;
3679 break;
3681 case 'u':
3682 /* These are just backpointers, so they don't matter. */
3683 break;
3685 case '0':
3686 break;
3688 /* It is believed that rtx's at this level will never
3689 contain anything but integers and other rtx's,
3690 except for within LABEL_REFs and SYMBOL_REFs. */
3691 default:
3692 abort ();
3695 return 1;
3698 /* Remove constant addition value from the expression X (when present)
3699 and return it. */
3701 static HOST_WIDE_INT
3702 remove_constant_addition (x)
3703 rtx *x;
3705 HOST_WIDE_INT addval = 0;
3706 rtx exp = *x;
3708 /* Avoid clobbering a shared CONST expression. */
3709 if (GET_CODE (exp) == CONST)
3711 if (GET_CODE (XEXP (exp, 0)) == PLUS
3712 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3713 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3715 *x = XEXP (XEXP (exp, 0), 0);
3716 return INTVAL (XEXP (XEXP (exp, 0), 1));
3718 return 0;
3721 if (GET_CODE (exp) == CONST_INT)
3723 addval = INTVAL (exp);
3724 *x = const0_rtx;
3727 /* For plus expression recurse on ourself. */
3728 else if (GET_CODE (exp) == PLUS)
3730 addval += remove_constant_addition (&XEXP (exp, 0));
3731 addval += remove_constant_addition (&XEXP (exp, 1));
3733 /* In case our parameter was constant, remove extra zero from the
3734 expression. */
3735 if (XEXP (exp, 0) == const0_rtx)
3736 *x = XEXP (exp, 1);
3737 else if (XEXP (exp, 1) == const0_rtx)
3738 *x = XEXP (exp, 0);
3741 return addval;
3744 /* Attempt to identify accesses to arrays that are most likely to cause cache
3745 misses, and emit prefetch instructions a few prefetch blocks forward.
3747 To detect the arrays we use the GIV information that was collected by the
3748 strength reduction pass.
3750 The prefetch instructions are generated after the GIV information is done
3751 and before the strength reduction process. The new GIVs are injected into
3752 the strength reduction tables, so the prefetch addresses are optimized as
3753 well.
3755 GIVs are split into base address, stride, and constant addition values.
3756 GIVs with the same address, stride and close addition values are combined
3757 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3758 for write instructions can be used for the block we write to, on machines
3759 that support write prefetches.
3761 Several heuristics are used to determine when to prefetch. They are
3762 controlled by defined symbols that can be overridden for each target. */
3764 static void
3765 emit_prefetch_instructions (loop)
3766 struct loop *loop;
3768 int num_prefetches = 0;
3769 int num_real_prefetches = 0;
3770 int num_real_write_prefetches = 0;
3771 int num_prefetches_before = 0;
3772 int num_write_prefetches_before = 0;
3773 int ahead = 0;
3774 int i;
3775 struct iv_class *bl;
3776 struct induction *iv;
3777 struct prefetch_info info[MAX_PREFETCHES];
3778 struct loop_ivs *ivs = LOOP_IVS (loop);
3780 if (!HAVE_prefetch)
3781 return;
3783 /* Consider only loops w/o calls. When a call is done, the loop is probably
3784 slow enough to read the memory. */
3785 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3787 if (loop_dump_stream)
3788 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3790 return;
3793 /* Don't prefetch in loops known to have few iterations. */
3794 if (PREFETCH_NO_LOW_LOOPCNT
3795 && LOOP_INFO (loop)->n_iterations
3796 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3798 if (loop_dump_stream)
3799 fprintf (loop_dump_stream,
3800 "Prefetch: ignoring loop: not enough iterations.\n");
3801 return;
3804 /* Search all induction variables and pick those interesting for the prefetch
3805 machinery. */
3806 for (bl = ivs->list; bl; bl = bl->next)
3808 struct induction *biv = bl->biv, *biv1;
3809 int basestride = 0;
3811 biv1 = biv;
3813 /* Expect all BIVs to be executed in each iteration. This makes our
3814 analysis more conservative. */
3815 while (biv1)
3817 /* Discard non-constant additions that we can't handle well yet, and
3818 BIVs that are executed multiple times; such BIVs ought to be
3819 handled in the nested loop. We accept not_every_iteration BIVs,
3820 since these only result in larger strides and make our
3821 heuristics more conservative. */
3822 if (GET_CODE (biv->add_val) != CONST_INT)
3824 if (loop_dump_stream)
3826 fprintf (loop_dump_stream,
3827 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3828 REGNO (biv->src_reg), INSN_UID (biv->insn));
3829 print_rtl (loop_dump_stream, biv->add_val);
3830 fprintf (loop_dump_stream, "\n");
3832 break;
3835 if (biv->maybe_multiple)
3837 if (loop_dump_stream)
3839 fprintf (loop_dump_stream,
3840 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3841 REGNO (biv->src_reg), INSN_UID (biv->insn));
3842 print_rtl (loop_dump_stream, biv->add_val);
3843 fprintf (loop_dump_stream, "\n");
3845 break;
3848 basestride += INTVAL (biv1->add_val);
3849 biv1 = biv1->next_iv;
3852 if (biv1 || !basestride)
3853 continue;
3855 for (iv = bl->giv; iv; iv = iv->next_iv)
3857 rtx address;
3858 rtx temp;
3859 HOST_WIDE_INT index = 0;
3860 int add = 1;
3861 HOST_WIDE_INT stride = 0;
3862 int stride_sign = 1;
3863 struct check_store_data d;
3864 const char *ignore_reason = NULL;
3865 int size = GET_MODE_SIZE (GET_MODE (iv));
3867 /* See whether an induction variable is interesting to us and if
3868 not, report the reason. */
3869 if (iv->giv_type != DEST_ADDR)
3870 ignore_reason = "giv is not a destination address";
3872 /* We are interested only in constant stride memory references
3873 in order to be able to compute density easily. */
3874 else if (GET_CODE (iv->mult_val) != CONST_INT)
3875 ignore_reason = "stride is not constant";
3877 else
3879 stride = INTVAL (iv->mult_val) * basestride;
3880 if (stride < 0)
3882 stride = -stride;
3883 stride_sign = -1;
3886 /* On some targets, reversed order prefetches are not
3887 worthwhile. */
3888 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3889 ignore_reason = "reversed order stride";
3891 /* Prefetch of accesses with an extreme stride might not be
3892 worthwhile, either. */
3893 else if (PREFETCH_NO_EXTREME_STRIDE
3894 && stride > PREFETCH_EXTREME_STRIDE)
3895 ignore_reason = "extreme stride";
3897 /* Ignore GIVs with varying add values; we can't predict the
3898 value for the next iteration. */
3899 else if (!loop_invariant_p (loop, iv->add_val))
3900 ignore_reason = "giv has varying add value";
3902 /* Ignore GIVs in the nested loops; they ought to have been
3903 handled already. */
3904 else if (iv->maybe_multiple)
3905 ignore_reason = "giv is in nested loop";
3908 if (ignore_reason != NULL)
3910 if (loop_dump_stream)
3911 fprintf (loop_dump_stream,
3912 "Prefetch: ignoring giv at %d: %s.\n",
3913 INSN_UID (iv->insn), ignore_reason);
3914 continue;
3917 /* Determine the pointer to the basic array we are examining. It is
3918 the sum of the BIV's initial value and the GIV's add_val. */
3919 address = copy_rtx (iv->add_val);
3920 temp = copy_rtx (bl->initial_value);
3922 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3923 index = remove_constant_addition (&address);
3925 d.mem_write = 0;
3926 d.mem_address = *iv->location;
3928 /* When the GIV is not always executed, we might be better off by
3929 not dirtying the cache pages. */
3930 if (PREFETCH_CONDITIONAL || iv->always_executed)
3931 note_stores (PATTERN (iv->insn), check_store, &d);
3932 else
3934 if (loop_dump_stream)
3935 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3936 INSN_UID (iv->insn), "in conditional code.");
3937 continue;
3940 /* Attempt to find another prefetch to the same array and see if we
3941 can merge this one. */
3942 for (i = 0; i < num_prefetches; i++)
3943 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3944 && stride == info[i].stride)
3946 /* In case both access same array (same location
3947 just with small difference in constant indexes), merge
3948 the prefetches. Just do the later and the earlier will
3949 get prefetched from previous iteration.
3950 The artificial threshold should not be too small,
3951 but also not bigger than small portion of memory usually
3952 traversed by single loop. */
3953 if (index >= info[i].index
3954 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3956 info[i].write |= d.mem_write;
3957 info[i].bytes_accessed += size;
3958 info[i].index = index;
3959 info[i].giv = iv;
3960 info[i].class = bl;
3961 info[num_prefetches].base_address = address;
3962 add = 0;
3963 break;
3966 if (index < info[i].index
3967 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3969 info[i].write |= d.mem_write;
3970 info[i].bytes_accessed += size;
3971 add = 0;
3972 break;
3976 /* Merging failed. */
3977 if (add)
3979 info[num_prefetches].giv = iv;
3980 info[num_prefetches].class = bl;
3981 info[num_prefetches].index = index;
3982 info[num_prefetches].stride = stride;
3983 info[num_prefetches].base_address = address;
3984 info[num_prefetches].write = d.mem_write;
3985 info[num_prefetches].bytes_accessed = size;
3986 num_prefetches++;
3987 if (num_prefetches >= MAX_PREFETCHES)
3989 if (loop_dump_stream)
3990 fprintf (loop_dump_stream,
3991 "Maximal number of prefetches exceeded.\n");
3992 return;
3998 for (i = 0; i < num_prefetches; i++)
4000 int density;
4002 /* Attempt to calculate the total number of bytes fetched by all
4003 iterations of the loop. Avoid overflow. */
4004 if (LOOP_INFO (loop)->n_iterations
4005 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4006 >= LOOP_INFO (loop)->n_iterations))
4007 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4008 else
4009 info[i].total_bytes = 0xffffffff;
4011 density = info[i].bytes_accessed * 100 / info[i].stride;
4013 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4014 if (PREFETCH_ONLY_DENSE_MEM)
4015 if (density * 256 > PREFETCH_DENSE_MEM * 100
4016 && (info[i].total_bytes / PREFETCH_BLOCK
4017 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4019 info[i].prefetch_before_loop = 1;
4020 info[i].prefetch_in_loop
4021 = (info[i].total_bytes / PREFETCH_BLOCK
4022 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4024 else
4026 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4027 if (loop_dump_stream)
4028 fprintf (loop_dump_stream,
4029 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4030 INSN_UID (info[i].giv->insn), density);
4032 else
4033 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4035 /* Find how many prefetch instructions we'll use within the loop. */
4036 if (info[i].prefetch_in_loop != 0)
4038 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4039 / PREFETCH_BLOCK);
4040 num_real_prefetches += info[i].prefetch_in_loop;
4041 if (info[i].write)
4042 num_real_write_prefetches += info[i].prefetch_in_loop;
4046 /* Determine how many iterations ahead to prefetch within the loop, based
4047 on how many prefetches we currently expect to do within the loop. */
4048 if (num_real_prefetches != 0)
4050 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4052 if (loop_dump_stream)
4053 fprintf (loop_dump_stream,
4054 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4055 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4056 num_real_prefetches = 0, num_real_write_prefetches = 0;
4059 /* We'll also use AHEAD to determine how many prefetch instructions to
4060 emit before a loop, so don't leave it zero. */
4061 if (ahead == 0)
4062 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4064 for (i = 0; i < num_prefetches; i++)
4066 /* Update if we've decided not to prefetch anything within the loop. */
4067 if (num_real_prefetches == 0)
4068 info[i].prefetch_in_loop = 0;
4070 /* Find how many prefetch instructions we'll use before the loop. */
4071 if (info[i].prefetch_before_loop != 0)
4073 int n = info[i].total_bytes / PREFETCH_BLOCK;
4074 if (n > ahead)
4075 n = ahead;
4076 info[i].prefetch_before_loop = n;
4077 num_prefetches_before += n;
4078 if (info[i].write)
4079 num_write_prefetches_before += n;
4082 if (loop_dump_stream)
4084 if (info[i].prefetch_in_loop == 0
4085 && info[i].prefetch_before_loop == 0)
4086 continue;
4087 fprintf (loop_dump_stream, "Prefetch insn: %d",
4088 INSN_UID (info[i].giv->insn));
4089 fprintf (loop_dump_stream,
4090 "; in loop: %d; before: %d; %s\n",
4091 info[i].prefetch_in_loop,
4092 info[i].prefetch_before_loop,
4093 info[i].write ? "read/write" : "read only");
4094 fprintf (loop_dump_stream,
4095 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4096 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4097 info[i].bytes_accessed, info[i].total_bytes);
4098 fprintf (loop_dump_stream, " index: ");
4099 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4100 fprintf (loop_dump_stream, "; stride: ");
4101 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4102 fprintf (loop_dump_stream, "; address: ");
4103 print_rtl (loop_dump_stream, info[i].base_address);
4104 fprintf (loop_dump_stream, "\n");
4108 if (num_real_prefetches + num_prefetches_before > 0)
4110 /* Record that this loop uses prefetch instructions. */
4111 LOOP_INFO (loop)->has_prefetch = 1;
4113 if (loop_dump_stream)
4115 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4116 num_real_prefetches, num_real_write_prefetches);
4117 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4118 num_prefetches_before, num_write_prefetches_before);
4122 for (i = 0; i < num_prefetches; i++)
4124 int y;
4126 for (y = 0; y < info[i].prefetch_in_loop; y++)
4128 rtx loc = copy_rtx (*info[i].giv->location);
4129 rtx insn;
4130 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4131 rtx before_insn = info[i].giv->insn;
4132 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4133 rtx seq;
4135 /* We can save some effort by offsetting the address on
4136 architectures with offsettable memory references. */
4137 if (offsettable_address_p (0, VOIDmode, loc))
4138 loc = plus_constant (loc, bytes_ahead);
4139 else
4141 rtx reg = gen_reg_rtx (Pmode);
4142 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4143 GEN_INT (bytes_ahead), reg,
4144 0, before_insn);
4145 loc = reg;
4148 start_sequence ();
4149 /* Make sure the address operand is valid for prefetch. */
4150 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4151 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4152 loc = force_reg (Pmode, loc);
4153 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4154 GEN_INT (3)));
4155 seq = get_insns ();
4156 end_sequence ();
4157 emit_insn_before (seq, before_insn);
4159 /* Check all insns emitted and record the new GIV
4160 information. */
4161 insn = NEXT_INSN (prev_insn);
4162 while (insn != before_insn)
4164 insn = check_insn_for_givs (loop, insn,
4165 info[i].giv->always_executed,
4166 info[i].giv->maybe_multiple);
4167 insn = NEXT_INSN (insn);
4171 if (PREFETCH_BEFORE_LOOP)
4173 /* Emit insns before the loop to fetch the first cache lines or,
4174 if we're not prefetching within the loop, everything we expect
4175 to need. */
4176 for (y = 0; y < info[i].prefetch_before_loop; y++)
4178 rtx reg = gen_reg_rtx (Pmode);
4179 rtx loop_start = loop->start;
4180 rtx init_val = info[i].class->initial_value;
4181 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4182 info[i].giv->add_val,
4183 GEN_INT (y * PREFETCH_BLOCK));
4185 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4186 non-constant INIT_VAL to have the same mode as REG, which
4187 in this case we know to be Pmode. */
4188 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4189 init_val = convert_to_mode (Pmode, init_val, 0);
4190 loop_iv_add_mult_emit_before (loop, init_val,
4191 info[i].giv->mult_val,
4192 add_val, reg, 0, loop_start);
4193 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4194 GEN_INT (3)),
4195 loop_start);
4200 return;
4203 /* A "basic induction variable" or biv is a pseudo reg that is set
4204 (within this loop) only by incrementing or decrementing it. */
4205 /* A "general induction variable" or giv is a pseudo reg whose
4206 value is a linear function of a biv. */
4208 /* Bivs are recognized by `basic_induction_var';
4209 Givs by `general_induction_var'. */
4211 /* Communication with routines called via `note_stores'. */
4213 static rtx note_insn;
4215 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4217 static rtx addr_placeholder;
4219 /* ??? Unfinished optimizations, and possible future optimizations,
4220 for the strength reduction code. */
4222 /* ??? The interaction of biv elimination, and recognition of 'constant'
4223 bivs, may cause problems. */
4225 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4226 performance problems.
4228 Perhaps don't eliminate things that can be combined with an addressing
4229 mode. Find all givs that have the same biv, mult_val, and add_val;
4230 then for each giv, check to see if its only use dies in a following
4231 memory address. If so, generate a new memory address and check to see
4232 if it is valid. If it is valid, then store the modified memory address,
4233 otherwise, mark the giv as not done so that it will get its own iv. */
4235 /* ??? Could try to optimize branches when it is known that a biv is always
4236 positive. */
4238 /* ??? When replace a biv in a compare insn, we should replace with closest
4239 giv so that an optimized branch can still be recognized by the combiner,
4240 e.g. the VAX acb insn. */
4242 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4243 was rerun in loop_optimize whenever a register was added or moved.
4244 Also, some of the optimizations could be a little less conservative. */
4246 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4247 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4248 callback.
4250 NOT_EVERY_ITERATION if current insn is not executed at least once for every
4251 loop iteration except for the last one.
4253 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4254 loop iteration.
4256 void
4257 for_each_insn_in_loop (loop, fncall)
4258 struct loop *loop;
4259 loop_insn_callback fncall;
4261 /* This is 1 if current insn is not executed at least once for every loop
4262 iteration. */
4263 int not_every_iteration = 0;
4264 int maybe_multiple = 0;
4265 int past_loop_latch = 0;
4266 int loop_depth = 0;
4267 rtx p;
4269 /* If loop_scan_start points to the loop exit test, we have to be wary of
4270 subversive use of gotos inside expression statements. */
4271 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4272 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4274 /* Scan through loop to find all possible bivs. */
4276 for (p = next_insn_in_loop (loop, loop->scan_start);
4277 p != NULL_RTX;
4278 p = next_insn_in_loop (loop, p))
4280 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4282 /* Past CODE_LABEL, we get to insns that may be executed multiple
4283 times. The only way we can be sure that they can't is if every
4284 jump insn between here and the end of the loop either
4285 returns, exits the loop, is a jump to a location that is still
4286 behind the label, or is a jump to the loop start. */
4288 if (GET_CODE (p) == CODE_LABEL)
4290 rtx insn = p;
4292 maybe_multiple = 0;
4294 while (1)
4296 insn = NEXT_INSN (insn);
4297 if (insn == loop->scan_start)
4298 break;
4299 if (insn == loop->end)
4301 if (loop->top != 0)
4302 insn = loop->top;
4303 else
4304 break;
4305 if (insn == loop->scan_start)
4306 break;
4309 if (GET_CODE (insn) == JUMP_INSN
4310 && GET_CODE (PATTERN (insn)) != RETURN
4311 && (!any_condjump_p (insn)
4312 || (JUMP_LABEL (insn) != 0
4313 && JUMP_LABEL (insn) != loop->scan_start
4314 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4316 maybe_multiple = 1;
4317 break;
4322 /* Past a jump, we get to insns for which we can't count
4323 on whether they will be executed during each iteration. */
4324 /* This code appears twice in strength_reduce. There is also similar
4325 code in scan_loop. */
4326 if (GET_CODE (p) == JUMP_INSN
4327 /* If we enter the loop in the middle, and scan around to the
4328 beginning, don't set not_every_iteration for that.
4329 This can be any kind of jump, since we want to know if insns
4330 will be executed if the loop is executed. */
4331 && !(JUMP_LABEL (p) == loop->top
4332 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4333 && any_uncondjump_p (p))
4334 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4336 rtx label = 0;
4338 /* If this is a jump outside the loop, then it also doesn't
4339 matter. Check to see if the target of this branch is on the
4340 loop->exits_labels list. */
4342 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4343 if (XEXP (label, 0) == JUMP_LABEL (p))
4344 break;
4346 if (!label)
4347 not_every_iteration = 1;
4350 else if (GET_CODE (p) == NOTE)
4352 /* At the virtual top of a converted loop, insns are again known to
4353 be executed each iteration: logically, the loop begins here
4354 even though the exit code has been duplicated.
4356 Insns are also again known to be executed each iteration at
4357 the LOOP_CONT note. */
4358 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4359 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4360 && loop_depth == 0)
4361 not_every_iteration = 0;
4362 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4363 loop_depth++;
4364 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4365 loop_depth--;
4368 /* Note if we pass a loop latch. If we do, then we can not clear
4369 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4370 a loop since a jump before the last CODE_LABEL may have started
4371 a new loop iteration.
4373 Note that LOOP_TOP is only set for rotated loops and we need
4374 this check for all loops, so compare against the CODE_LABEL
4375 which immediately follows LOOP_START. */
4376 if (GET_CODE (p) == JUMP_INSN
4377 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4378 past_loop_latch = 1;
4380 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4381 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4382 or not an insn is known to be executed each iteration of the
4383 loop, whether or not any iterations are known to occur.
4385 Therefore, if we have just passed a label and have no more labels
4386 between here and the test insn of the loop, and we have not passed
4387 a jump to the top of the loop, then we know these insns will be
4388 executed each iteration. */
4390 if (not_every_iteration
4391 && !past_loop_latch
4392 && GET_CODE (p) == CODE_LABEL
4393 && no_labels_between_p (p, loop->end)
4394 && loop_insn_first_p (p, loop->cont))
4395 not_every_iteration = 0;
4399 static void
4400 loop_bivs_find (loop)
4401 struct loop *loop;
4403 struct loop_regs *regs = LOOP_REGS (loop);
4404 struct loop_ivs *ivs = LOOP_IVS (loop);
4405 /* Temporary list pointers for traversing ivs->list. */
4406 struct iv_class *bl, **backbl;
4408 ivs->list = 0;
4410 for_each_insn_in_loop (loop, check_insn_for_bivs);
4412 /* Scan ivs->list to remove all regs that proved not to be bivs.
4413 Make a sanity check against regs->n_times_set. */
4414 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4416 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4417 /* Above happens if register modified by subreg, etc. */
4418 /* Make sure it is not recognized as a basic induction var: */
4419 || regs->array[bl->regno].n_times_set != bl->biv_count
4420 /* If never incremented, it is invariant that we decided not to
4421 move. So leave it alone. */
4422 || ! bl->incremented)
4424 if (loop_dump_stream)
4425 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4426 bl->regno,
4427 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4428 ? "not induction variable"
4429 : (! bl->incremented ? "never incremented"
4430 : "count error")));
4432 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4433 *backbl = bl->next;
4435 else
4437 backbl = &bl->next;
4439 if (loop_dump_stream)
4440 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4446 /* Determine how BIVS are initialised by looking through pre-header
4447 extended basic block. */
4448 static void
4449 loop_bivs_init_find (loop)
4450 struct loop *loop;
4452 struct loop_ivs *ivs = LOOP_IVS (loop);
4453 /* Temporary list pointers for traversing ivs->list. */
4454 struct iv_class *bl;
4455 int call_seen;
4456 rtx p;
4458 /* Find initial value for each biv by searching backwards from loop_start,
4459 halting at first label. Also record any test condition. */
4461 call_seen = 0;
4462 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4464 rtx test;
4466 note_insn = p;
4468 if (GET_CODE (p) == CALL_INSN)
4469 call_seen = 1;
4471 if (INSN_P (p))
4472 note_stores (PATTERN (p), record_initial, ivs);
4474 /* Record any test of a biv that branches around the loop if no store
4475 between it and the start of loop. We only care about tests with
4476 constants and registers and only certain of those. */
4477 if (GET_CODE (p) == JUMP_INSN
4478 && JUMP_LABEL (p) != 0
4479 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4480 && (test = get_condition_for_loop (loop, p)) != 0
4481 && GET_CODE (XEXP (test, 0)) == REG
4482 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4483 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4484 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4485 && bl->init_insn == 0)
4487 /* If an NE test, we have an initial value! */
4488 if (GET_CODE (test) == NE)
4490 bl->init_insn = p;
4491 bl->init_set = gen_rtx_SET (VOIDmode,
4492 XEXP (test, 0), XEXP (test, 1));
4494 else
4495 bl->initial_test = test;
4501 /* Look at the each biv and see if we can say anything better about its
4502 initial value from any initializing insns set up above. (This is done
4503 in two passes to avoid missing SETs in a PARALLEL.) */
4504 static void
4505 loop_bivs_check (loop)
4506 struct loop *loop;
4508 struct loop_ivs *ivs = LOOP_IVS (loop);
4509 /* Temporary list pointers for traversing ivs->list. */
4510 struct iv_class *bl;
4511 struct iv_class **backbl;
4513 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4515 rtx src;
4516 rtx note;
4518 if (! bl->init_insn)
4519 continue;
4521 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4522 is a constant, use the value of that. */
4523 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4524 && CONSTANT_P (XEXP (note, 0)))
4525 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4526 && CONSTANT_P (XEXP (note, 0))))
4527 src = XEXP (note, 0);
4528 else
4529 src = SET_SRC (bl->init_set);
4531 if (loop_dump_stream)
4532 fprintf (loop_dump_stream,
4533 "Biv %d: initialized at insn %d: initial value ",
4534 bl->regno, INSN_UID (bl->init_insn));
4536 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4537 || GET_MODE (src) == VOIDmode)
4538 && valid_initial_value_p (src, bl->init_insn,
4539 LOOP_INFO (loop)->pre_header_has_call,
4540 loop->start))
4542 bl->initial_value = src;
4544 if (loop_dump_stream)
4546 print_simple_rtl (loop_dump_stream, src);
4547 fputc ('\n', loop_dump_stream);
4550 /* If we can't make it a giv,
4551 let biv keep initial value of "itself". */
4552 else if (loop_dump_stream)
4553 fprintf (loop_dump_stream, "is complex\n");
4558 /* Search the loop for general induction variables. */
4560 static void
4561 loop_givs_find (loop)
4562 struct loop* loop;
4564 for_each_insn_in_loop (loop, check_insn_for_givs);
4568 /* For each giv for which we still don't know whether or not it is
4569 replaceable, check to see if it is replaceable because its final value
4570 can be calculated. */
4572 static void
4573 loop_givs_check (loop)
4574 struct loop *loop;
4576 struct loop_ivs *ivs = LOOP_IVS (loop);
4577 struct iv_class *bl;
4579 for (bl = ivs->list; bl; bl = bl->next)
4581 struct induction *v;
4583 for (v = bl->giv; v; v = v->next_iv)
4584 if (! v->replaceable && ! v->not_replaceable)
4585 check_final_value (loop, v);
4590 /* Return non-zero if it is possible to eliminate the biv BL provided
4591 all givs are reduced. This is possible if either the reg is not
4592 used outside the loop, or we can compute what its final value will
4593 be. */
4595 static int
4596 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4597 struct loop *loop;
4598 struct iv_class *bl;
4599 int threshold;
4600 int insn_count;
4602 /* For architectures with a decrement_and_branch_until_zero insn,
4603 don't do this if we put a REG_NONNEG note on the endtest for this
4604 biv. */
4606 #ifdef HAVE_decrement_and_branch_until_zero
4607 if (bl->nonneg)
4609 if (loop_dump_stream)
4610 fprintf (loop_dump_stream,
4611 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4612 return 0;
4614 #endif
4616 /* Check that biv is used outside loop or if it has a final value.
4617 Compare against bl->init_insn rather than loop->start. We aren't
4618 concerned with any uses of the biv between init_insn and
4619 loop->start since these won't be affected by the value of the biv
4620 elsewhere in the function, so long as init_insn doesn't use the
4621 biv itself. */
4623 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4624 && bl->init_insn
4625 && INSN_UID (bl->init_insn) < max_uid_for_loop
4626 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4627 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4628 || (bl->final_value = final_biv_value (loop, bl)))
4629 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4631 if (loop_dump_stream)
4633 fprintf (loop_dump_stream,
4634 "Cannot eliminate biv %d.\n",
4635 bl->regno);
4636 fprintf (loop_dump_stream,
4637 "First use: insn %d, last use: insn %d.\n",
4638 REGNO_FIRST_UID (bl->regno),
4639 REGNO_LAST_UID (bl->regno));
4641 return 0;
4645 /* Reduce each giv of BL that we have decided to reduce. */
4647 static void
4648 loop_givs_reduce (loop, bl)
4649 struct loop *loop;
4650 struct iv_class *bl;
4652 struct induction *v;
4654 for (v = bl->giv; v; v = v->next_iv)
4656 struct induction *tv;
4657 if (! v->ignore && v->same == 0)
4659 int auto_inc_opt = 0;
4661 /* If the code for derived givs immediately below has already
4662 allocated a new_reg, we must keep it. */
4663 if (! v->new_reg)
4664 v->new_reg = gen_reg_rtx (v->mode);
4666 #ifdef AUTO_INC_DEC
4667 /* If the target has auto-increment addressing modes, and
4668 this is an address giv, then try to put the increment
4669 immediately after its use, so that flow can create an
4670 auto-increment addressing mode. */
4671 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4672 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4673 /* We don't handle reversed biv's because bl->biv->insn
4674 does not have a valid INSN_LUID. */
4675 && ! bl->reversed
4676 && v->always_executed && ! v->maybe_multiple
4677 && INSN_UID (v->insn) < max_uid_for_loop)
4679 /* If other giv's have been combined with this one, then
4680 this will work only if all uses of the other giv's occur
4681 before this giv's insn. This is difficult to check.
4683 We simplify this by looking for the common case where
4684 there is one DEST_REG giv, and this giv's insn is the
4685 last use of the dest_reg of that DEST_REG giv. If the
4686 increment occurs after the address giv, then we can
4687 perform the optimization. (Otherwise, the increment
4688 would have to go before other_giv, and we would not be
4689 able to combine it with the address giv to get an
4690 auto-inc address.) */
4691 if (v->combined_with)
4693 struct induction *other_giv = 0;
4695 for (tv = bl->giv; tv; tv = tv->next_iv)
4696 if (tv->same == v)
4698 if (other_giv)
4699 break;
4700 else
4701 other_giv = tv;
4703 if (! tv && other_giv
4704 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4705 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4706 == INSN_UID (v->insn))
4707 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4708 auto_inc_opt = 1;
4710 /* Check for case where increment is before the address
4711 giv. Do this test in "loop order". */
4712 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4713 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4714 || (INSN_LUID (bl->biv->insn)
4715 > INSN_LUID (loop->scan_start))))
4716 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4717 && (INSN_LUID (loop->scan_start)
4718 < INSN_LUID (bl->biv->insn))))
4719 auto_inc_opt = -1;
4720 else
4721 auto_inc_opt = 1;
4723 #ifdef HAVE_cc0
4725 rtx prev;
4727 /* We can't put an insn immediately after one setting
4728 cc0, or immediately before one using cc0. */
4729 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4730 || (auto_inc_opt == -1
4731 && (prev = prev_nonnote_insn (v->insn)) != 0
4732 && INSN_P (prev)
4733 && sets_cc0_p (PATTERN (prev))))
4734 auto_inc_opt = 0;
4736 #endif
4738 if (auto_inc_opt)
4739 v->auto_inc_opt = 1;
4741 #endif
4743 /* For each place where the biv is incremented, add an insn
4744 to increment the new, reduced reg for the giv. */
4745 for (tv = bl->biv; tv; tv = tv->next_iv)
4747 rtx insert_before;
4749 if (! auto_inc_opt)
4750 insert_before = tv->insn;
4751 else if (auto_inc_opt == 1)
4752 insert_before = NEXT_INSN (v->insn);
4753 else
4754 insert_before = v->insn;
4756 if (tv->mult_val == const1_rtx)
4757 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4758 v->new_reg, v->new_reg,
4759 0, insert_before);
4760 else /* tv->mult_val == const0_rtx */
4761 /* A multiply is acceptable here
4762 since this is presumed to be seldom executed. */
4763 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4764 v->add_val, v->new_reg,
4765 0, insert_before);
4768 /* Add code at loop start to initialize giv's reduced reg. */
4770 loop_iv_add_mult_hoist (loop,
4771 extend_value_for_giv (v, bl->initial_value),
4772 v->mult_val, v->add_val, v->new_reg);
4778 /* Check for givs whose first use is their definition and whose
4779 last use is the definition of another giv. If so, it is likely
4780 dead and should not be used to derive another giv nor to
4781 eliminate a biv. */
4783 static void
4784 loop_givs_dead_check (loop, bl)
4785 struct loop *loop ATTRIBUTE_UNUSED;
4786 struct iv_class *bl;
4788 struct induction *v;
4790 for (v = bl->giv; v; v = v->next_iv)
4792 if (v->ignore
4793 || (v->same && v->same->ignore))
4794 continue;
4796 if (v->giv_type == DEST_REG
4797 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4799 struct induction *v1;
4801 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4802 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4803 v->maybe_dead = 1;
4809 static void
4810 loop_givs_rescan (loop, bl, reg_map)
4811 struct loop *loop;
4812 struct iv_class *bl;
4813 rtx *reg_map;
4815 struct induction *v;
4817 for (v = bl->giv; v; v = v->next_iv)
4819 if (v->same && v->same->ignore)
4820 v->ignore = 1;
4822 if (v->ignore)
4823 continue;
4825 /* Update expression if this was combined, in case other giv was
4826 replaced. */
4827 if (v->same)
4828 v->new_reg = replace_rtx (v->new_reg,
4829 v->same->dest_reg, v->same->new_reg);
4831 /* See if this register is known to be a pointer to something. If
4832 so, see if we can find the alignment. First see if there is a
4833 destination register that is a pointer. If so, this shares the
4834 alignment too. Next see if we can deduce anything from the
4835 computational information. If not, and this is a DEST_ADDR
4836 giv, at least we know that it's a pointer, though we don't know
4837 the alignment. */
4838 if (GET_CODE (v->new_reg) == REG
4839 && v->giv_type == DEST_REG
4840 && REG_POINTER (v->dest_reg))
4841 mark_reg_pointer (v->new_reg,
4842 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4843 else if (GET_CODE (v->new_reg) == REG
4844 && REG_POINTER (v->src_reg))
4846 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4848 if (align == 0
4849 || GET_CODE (v->add_val) != CONST_INT
4850 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4851 align = 0;
4853 mark_reg_pointer (v->new_reg, align);
4855 else if (GET_CODE (v->new_reg) == REG
4856 && GET_CODE (v->add_val) == REG
4857 && REG_POINTER (v->add_val))
4859 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4861 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4862 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4863 align = 0;
4865 mark_reg_pointer (v->new_reg, align);
4867 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4868 mark_reg_pointer (v->new_reg, 0);
4870 if (v->giv_type == DEST_ADDR)
4871 /* Store reduced reg as the address in the memref where we found
4872 this giv. */
4873 validate_change (v->insn, v->location, v->new_reg, 0);
4874 else if (v->replaceable)
4876 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4878 else
4880 /* Not replaceable; emit an insn to set the original giv reg from
4881 the reduced giv, same as above. */
4882 loop_insn_emit_after (loop, 0, v->insn,
4883 gen_move_insn (v->dest_reg, v->new_reg));
4886 /* When a loop is reversed, givs which depend on the reversed
4887 biv, and which are live outside the loop, must be set to their
4888 correct final value. This insn is only needed if the giv is
4889 not replaceable. The correct final value is the same as the
4890 value that the giv starts the reversed loop with. */
4891 if (bl->reversed && ! v->replaceable)
4892 loop_iv_add_mult_sink (loop,
4893 extend_value_for_giv (v, bl->initial_value),
4894 v->mult_val, v->add_val, v->dest_reg);
4895 else if (v->final_value)
4896 loop_insn_sink_or_swim (loop,
4897 gen_load_of_final_value (v->dest_reg,
4898 v->final_value));
4900 if (loop_dump_stream)
4902 fprintf (loop_dump_stream, "giv at %d reduced to ",
4903 INSN_UID (v->insn));
4904 print_simple_rtl (loop_dump_stream, v->new_reg);
4905 fprintf (loop_dump_stream, "\n");
4911 static int
4912 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4913 struct loop *loop ATTRIBUTE_UNUSED;
4914 struct iv_class *bl;
4915 struct induction *v;
4916 rtx test_reg;
4918 int add_cost;
4919 int benefit;
4921 benefit = v->benefit;
4922 PUT_MODE (test_reg, v->mode);
4923 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4924 test_reg, test_reg);
4926 /* Reduce benefit if not replaceable, since we will insert a
4927 move-insn to replace the insn that calculates this giv. Don't do
4928 this unless the giv is a user variable, since it will often be
4929 marked non-replaceable because of the duplication of the exit
4930 code outside the loop. In such a case, the copies we insert are
4931 dead and will be deleted. So they don't have a cost. Similar
4932 situations exist. */
4933 /* ??? The new final_[bg]iv_value code does a much better job of
4934 finding replaceable giv's, and hence this code may no longer be
4935 necessary. */
4936 if (! v->replaceable && ! bl->eliminable
4937 && REG_USERVAR_P (v->dest_reg))
4938 benefit -= copy_cost;
4940 /* Decrease the benefit to count the add-insns that we will insert
4941 to increment the reduced reg for the giv. ??? This can
4942 overestimate the run-time cost of the additional insns, e.g. if
4943 there are multiple basic blocks that increment the biv, but only
4944 one of these blocks is executed during each iteration. There is
4945 no good way to detect cases like this with the current structure
4946 of the loop optimizer. This code is more accurate for
4947 determining code size than run-time benefits. */
4948 benefit -= add_cost * bl->biv_count;
4950 /* Decide whether to strength-reduce this giv or to leave the code
4951 unchanged (recompute it from the biv each time it is used). This
4952 decision can be made independently for each giv. */
4954 #ifdef AUTO_INC_DEC
4955 /* Attempt to guess whether autoincrement will handle some of the
4956 new add insns; if so, increase BENEFIT (undo the subtraction of
4957 add_cost that was done above). */
4958 if (v->giv_type == DEST_ADDR
4959 /* Increasing the benefit is risky, since this is only a guess.
4960 Avoid increasing register pressure in cases where there would
4961 be no other benefit from reducing this giv. */
4962 && benefit > 0
4963 && GET_CODE (v->mult_val) == CONST_INT)
4965 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4967 if (HAVE_POST_INCREMENT
4968 && INTVAL (v->mult_val) == size)
4969 benefit += add_cost * bl->biv_count;
4970 else if (HAVE_PRE_INCREMENT
4971 && INTVAL (v->mult_val) == size)
4972 benefit += add_cost * bl->biv_count;
4973 else if (HAVE_POST_DECREMENT
4974 && -INTVAL (v->mult_val) == size)
4975 benefit += add_cost * bl->biv_count;
4976 else if (HAVE_PRE_DECREMENT
4977 && -INTVAL (v->mult_val) == size)
4978 benefit += add_cost * bl->biv_count;
4980 #endif
4982 return benefit;
4986 /* Free IV structures for LOOP. */
4988 static void
4989 loop_ivs_free (loop)
4990 struct loop *loop;
4992 struct loop_ivs *ivs = LOOP_IVS (loop);
4993 struct iv_class *iv = ivs->list;
4995 free (ivs->regs);
4997 while (iv)
4999 struct iv_class *next = iv->next;
5000 struct induction *induction;
5001 struct induction *next_induction;
5003 for (induction = iv->biv; induction; induction = next_induction)
5005 next_induction = induction->next_iv;
5006 free (induction);
5008 for (induction = iv->giv; induction; induction = next_induction)
5010 next_induction = induction->next_iv;
5011 free (induction);
5014 free (iv);
5015 iv = next;
5020 /* Perform strength reduction and induction variable elimination.
5022 Pseudo registers created during this function will be beyond the
5023 last valid index in several tables including
5024 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5025 problem here, because the added registers cannot be givs outside of
5026 their loop, and hence will never be reconsidered. But scan_loop
5027 must check regnos to make sure they are in bounds. */
5029 static void
5030 strength_reduce (loop, flags)
5031 struct loop *loop;
5032 int flags;
5034 struct loop_info *loop_info = LOOP_INFO (loop);
5035 struct loop_regs *regs = LOOP_REGS (loop);
5036 struct loop_ivs *ivs = LOOP_IVS (loop);
5037 rtx p;
5038 /* Temporary list pointer for traversing ivs->list. */
5039 struct iv_class *bl;
5040 /* Ratio of extra register life span we can justify
5041 for saving an instruction. More if loop doesn't call subroutines
5042 since in that case saving an insn makes more difference
5043 and more registers are available. */
5044 /* ??? could set this to last value of threshold in move_movables */
5045 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5046 /* Map of pseudo-register replacements. */
5047 rtx *reg_map = NULL;
5048 int reg_map_size;
5049 int unrolled_insn_copies = 0;
5050 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5051 int insn_count = count_insns_in_loop (loop);
5053 addr_placeholder = gen_reg_rtx (Pmode);
5055 ivs->n_regs = max_reg_before_loop;
5056 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5058 /* Find all BIVs in loop. */
5059 loop_bivs_find (loop);
5061 /* Exit if there are no bivs. */
5062 if (! ivs->list)
5064 /* Can still unroll the loop anyways, but indicate that there is no
5065 strength reduction info available. */
5066 if (flags & LOOP_UNROLL)
5067 unroll_loop (loop, insn_count, 0);
5069 loop_ivs_free (loop);
5070 return;
5073 /* Determine how BIVS are initialised by looking through pre-header
5074 extended basic block. */
5075 loop_bivs_init_find (loop);
5077 /* Look at the each biv and see if we can say anything better about its
5078 initial value from any initializing insns set up above. */
5079 loop_bivs_check (loop);
5081 /* Search the loop for general induction variables. */
5082 loop_givs_find (loop);
5084 /* Try to calculate and save the number of loop iterations. This is
5085 set to zero if the actual number can not be calculated. This must
5086 be called after all giv's have been identified, since otherwise it may
5087 fail if the iteration variable is a giv. */
5088 loop_iterations (loop);
5090 #ifdef HAVE_prefetch
5091 if (flags & LOOP_PREFETCH)
5092 emit_prefetch_instructions (loop);
5093 #endif
5095 /* Now for each giv for which we still don't know whether or not it is
5096 replaceable, check to see if it is replaceable because its final value
5097 can be calculated. This must be done after loop_iterations is called,
5098 so that final_giv_value will work correctly. */
5099 loop_givs_check (loop);
5101 /* Try to prove that the loop counter variable (if any) is always
5102 nonnegative; if so, record that fact with a REG_NONNEG note
5103 so that "decrement and branch until zero" insn can be used. */
5104 check_dbra_loop (loop, insn_count);
5106 /* Create reg_map to hold substitutions for replaceable giv regs.
5107 Some givs might have been made from biv increments, so look at
5108 ivs->reg_iv_type for a suitable size. */
5109 reg_map_size = ivs->n_regs;
5110 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5112 /* Examine each iv class for feasibility of strength reduction/induction
5113 variable elimination. */
5115 for (bl = ivs->list; bl; bl = bl->next)
5117 struct induction *v;
5118 int benefit;
5120 /* Test whether it will be possible to eliminate this biv
5121 provided all givs are reduced. */
5122 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5124 /* This will be true at the end, if all givs which depend on this
5125 biv have been strength reduced.
5126 We can't (currently) eliminate the biv unless this is so. */
5127 bl->all_reduced = 1;
5129 /* Check each extension dependent giv in this class to see if its
5130 root biv is safe from wrapping in the interior mode. */
5131 check_ext_dependent_givs (bl, loop_info);
5133 /* Combine all giv's for this iv_class. */
5134 combine_givs (regs, bl);
5136 for (v = bl->giv; v; v = v->next_iv)
5138 struct induction *tv;
5140 if (v->ignore || v->same)
5141 continue;
5143 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5145 /* If an insn is not to be strength reduced, then set its ignore
5146 flag, and clear bl->all_reduced. */
5148 /* A giv that depends on a reversed biv must be reduced if it is
5149 used after the loop exit, otherwise, it would have the wrong
5150 value after the loop exit. To make it simple, just reduce all
5151 of such giv's whether or not we know they are used after the loop
5152 exit. */
5154 if (! flag_reduce_all_givs
5155 && v->lifetime * threshold * benefit < insn_count
5156 && ! bl->reversed)
5158 if (loop_dump_stream)
5159 fprintf (loop_dump_stream,
5160 "giv of insn %d not worth while, %d vs %d.\n",
5161 INSN_UID (v->insn),
5162 v->lifetime * threshold * benefit, insn_count);
5163 v->ignore = 1;
5164 bl->all_reduced = 0;
5166 else
5168 /* Check that we can increment the reduced giv without a
5169 multiply insn. If not, reject it. */
5171 for (tv = bl->biv; tv; tv = tv->next_iv)
5172 if (tv->mult_val == const1_rtx
5173 && ! product_cheap_p (tv->add_val, v->mult_val))
5175 if (loop_dump_stream)
5176 fprintf (loop_dump_stream,
5177 "giv of insn %d: would need a multiply.\n",
5178 INSN_UID (v->insn));
5179 v->ignore = 1;
5180 bl->all_reduced = 0;
5181 break;
5186 /* Check for givs whose first use is their definition and whose
5187 last use is the definition of another giv. If so, it is likely
5188 dead and should not be used to derive another giv nor to
5189 eliminate a biv. */
5190 loop_givs_dead_check (loop, bl);
5192 /* Reduce each giv that we decided to reduce. */
5193 loop_givs_reduce (loop, bl);
5195 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5196 as not reduced.
5198 For each giv register that can be reduced now: if replaceable,
5199 substitute reduced reg wherever the old giv occurs;
5200 else add new move insn "giv_reg = reduced_reg". */
5201 loop_givs_rescan (loop, bl, reg_map);
5203 /* All the givs based on the biv bl have been reduced if they
5204 merit it. */
5206 /* For each giv not marked as maybe dead that has been combined with a
5207 second giv, clear any "maybe dead" mark on that second giv.
5208 v->new_reg will either be or refer to the register of the giv it
5209 combined with.
5211 Doing this clearing avoids problems in biv elimination where
5212 a giv's new_reg is a complex value that can't be put in the
5213 insn but the giv combined with (with a reg as new_reg) is
5214 marked maybe_dead. Since the register will be used in either
5215 case, we'd prefer it be used from the simpler giv. */
5217 for (v = bl->giv; v; v = v->next_iv)
5218 if (! v->maybe_dead && v->same)
5219 v->same->maybe_dead = 0;
5221 /* Try to eliminate the biv, if it is a candidate.
5222 This won't work if ! bl->all_reduced,
5223 since the givs we planned to use might not have been reduced.
5225 We have to be careful that we didn't initially think we could
5226 eliminate this biv because of a giv that we now think may be
5227 dead and shouldn't be used as a biv replacement.
5229 Also, there is the possibility that we may have a giv that looks
5230 like it can be used to eliminate a biv, but the resulting insn
5231 isn't valid. This can happen, for example, on the 88k, where a
5232 JUMP_INSN can compare a register only with zero. Attempts to
5233 replace it with a compare with a constant will fail.
5235 Note that in cases where this call fails, we may have replaced some
5236 of the occurrences of the biv with a giv, but no harm was done in
5237 doing so in the rare cases where it can occur. */
5239 if (bl->all_reduced == 1 && bl->eliminable
5240 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5242 /* ?? If we created a new test to bypass the loop entirely,
5243 or otherwise drop straight in, based on this test, then
5244 we might want to rewrite it also. This way some later
5245 pass has more hope of removing the initialization of this
5246 biv entirely. */
5248 /* If final_value != 0, then the biv may be used after loop end
5249 and we must emit an insn to set it just in case.
5251 Reversed bivs already have an insn after the loop setting their
5252 value, so we don't need another one. We can't calculate the
5253 proper final value for such a biv here anyways. */
5254 if (bl->final_value && ! bl->reversed)
5255 loop_insn_sink_or_swim (loop,
5256 gen_load_of_final_value (bl->biv->dest_reg,
5257 bl->final_value));
5259 if (loop_dump_stream)
5260 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5261 bl->regno);
5263 /* See above note wrt final_value. But since we couldn't eliminate
5264 the biv, we must set the value after the loop instead of before. */
5265 else if (bl->final_value && ! bl->reversed)
5266 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5267 bl->final_value));
5270 /* Go through all the instructions in the loop, making all the
5271 register substitutions scheduled in REG_MAP. */
5273 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5274 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5275 || GET_CODE (p) == CALL_INSN)
5277 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5278 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5279 INSN_CODE (p) = -1;
5282 if (loop_info->n_iterations > 0)
5284 /* When we completely unroll a loop we will likely not need the increment
5285 of the loop BIV and we will not need the conditional branch at the
5286 end of the loop. */
5287 unrolled_insn_copies = insn_count - 2;
5289 #ifdef HAVE_cc0
5290 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5291 need the comparison before the conditional branch at the end of the
5292 loop. */
5293 unrolled_insn_copies -= 1;
5294 #endif
5296 /* We'll need one copy for each loop iteration. */
5297 unrolled_insn_copies *= loop_info->n_iterations;
5299 /* A little slop to account for the ability to remove initialization
5300 code, better CSE, and other secondary benefits of completely
5301 unrolling some loops. */
5302 unrolled_insn_copies -= 1;
5304 /* Clamp the value. */
5305 if (unrolled_insn_copies < 0)
5306 unrolled_insn_copies = 0;
5309 /* Unroll loops from within strength reduction so that we can use the
5310 induction variable information that strength_reduce has already
5311 collected. Always unroll loops that would be as small or smaller
5312 unrolled than when rolled. */
5313 if ((flags & LOOP_UNROLL)
5314 || (!(flags & LOOP_FIRST_PASS)
5315 && loop_info->n_iterations > 0
5316 && unrolled_insn_copies <= insn_count))
5317 unroll_loop (loop, insn_count, 1);
5319 #ifdef HAVE_doloop_end
5320 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5321 doloop_optimize (loop);
5322 #endif /* HAVE_doloop_end */
5324 /* In case number of iterations is known, drop branch prediction note
5325 in the branch. Do that only in second loop pass, as loop unrolling
5326 may change the number of iterations performed. */
5327 if (flags & LOOP_BCT)
5329 unsigned HOST_WIDE_INT n
5330 = loop_info->n_iterations / loop_info->unroll_number;
5331 if (n > 1)
5332 predict_insn (PREV_INSN (loop->end), PRED_LOOP_ITERATIONS,
5333 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5336 if (loop_dump_stream)
5337 fprintf (loop_dump_stream, "\n");
5339 loop_ivs_free (loop);
5340 if (reg_map)
5341 free (reg_map);
5344 /*Record all basic induction variables calculated in the insn. */
5345 static rtx
5346 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5347 struct loop *loop;
5348 rtx p;
5349 int not_every_iteration;
5350 int maybe_multiple;
5352 struct loop_ivs *ivs = LOOP_IVS (loop);
5353 rtx set;
5354 rtx dest_reg;
5355 rtx inc_val;
5356 rtx mult_val;
5357 rtx *location;
5359 if (GET_CODE (p) == INSN
5360 && (set = single_set (p))
5361 && GET_CODE (SET_DEST (set)) == REG)
5363 dest_reg = SET_DEST (set);
5364 if (REGNO (dest_reg) < max_reg_before_loop
5365 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5366 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5368 if (basic_induction_var (loop, SET_SRC (set),
5369 GET_MODE (SET_SRC (set)),
5370 dest_reg, p, &inc_val, &mult_val,
5371 &location))
5373 /* It is a possible basic induction variable.
5374 Create and initialize an induction structure for it. */
5376 struct induction *v
5377 = (struct induction *) xmalloc (sizeof (struct induction));
5379 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5380 not_every_iteration, maybe_multiple);
5381 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5383 else if (REGNO (dest_reg) < ivs->n_regs)
5384 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5387 return p;
5390 /* Record all givs calculated in the insn.
5391 A register is a giv if: it is only set once, it is a function of a
5392 biv and a constant (or invariant), and it is not a biv. */
5393 static rtx
5394 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5395 struct loop *loop;
5396 rtx p;
5397 int not_every_iteration;
5398 int maybe_multiple;
5400 struct loop_regs *regs = LOOP_REGS (loop);
5402 rtx set;
5403 /* Look for a general induction variable in a register. */
5404 if (GET_CODE (p) == INSN
5405 && (set = single_set (p))
5406 && GET_CODE (SET_DEST (set)) == REG
5407 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5409 rtx src_reg;
5410 rtx dest_reg;
5411 rtx add_val;
5412 rtx mult_val;
5413 rtx ext_val;
5414 int benefit;
5415 rtx regnote = 0;
5416 rtx last_consec_insn;
5418 dest_reg = SET_DEST (set);
5419 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5420 return p;
5422 if (/* SET_SRC is a giv. */
5423 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5424 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5425 /* Equivalent expression is a giv. */
5426 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5427 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5428 &add_val, &mult_val, &ext_val, 0,
5429 &benefit, VOIDmode)))
5430 /* Don't try to handle any regs made by loop optimization.
5431 We have nothing on them in regno_first_uid, etc. */
5432 && REGNO (dest_reg) < max_reg_before_loop
5433 /* Don't recognize a BASIC_INDUCT_VAR here. */
5434 && dest_reg != src_reg
5435 /* This must be the only place where the register is set. */
5436 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5437 /* or all sets must be consecutive and make a giv. */
5438 || (benefit = consec_sets_giv (loop, benefit, p,
5439 src_reg, dest_reg,
5440 &add_val, &mult_val, &ext_val,
5441 &last_consec_insn))))
5443 struct induction *v
5444 = (struct induction *) xmalloc (sizeof (struct induction));
5446 /* If this is a library call, increase benefit. */
5447 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5448 benefit += libcall_benefit (p);
5450 /* Skip the consecutive insns, if there are any. */
5451 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5452 p = last_consec_insn;
5454 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5455 ext_val, benefit, DEST_REG, not_every_iteration,
5456 maybe_multiple, (rtx*) 0);
5461 #ifndef DONT_REDUCE_ADDR
5462 /* Look for givs which are memory addresses. */
5463 /* This resulted in worse code on a VAX 8600. I wonder if it
5464 still does. */
5465 if (GET_CODE (p) == INSN)
5466 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5467 maybe_multiple);
5468 #endif
5470 /* Update the status of whether giv can derive other givs. This can
5471 change when we pass a label or an insn that updates a biv. */
5472 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5473 || GET_CODE (p) == CODE_LABEL)
5474 update_giv_derive (loop, p);
5475 return p;
5478 /* Return 1 if X is a valid source for an initial value (or as value being
5479 compared against in an initial test).
5481 X must be either a register or constant and must not be clobbered between
5482 the current insn and the start of the loop.
5484 INSN is the insn containing X. */
5486 static int
5487 valid_initial_value_p (x, insn, call_seen, loop_start)
5488 rtx x;
5489 rtx insn;
5490 int call_seen;
5491 rtx loop_start;
5493 if (CONSTANT_P (x))
5494 return 1;
5496 /* Only consider pseudos we know about initialized in insns whose luids
5497 we know. */
5498 if (GET_CODE (x) != REG
5499 || REGNO (x) >= max_reg_before_loop)
5500 return 0;
5502 /* Don't use call-clobbered registers across a call which clobbers it. On
5503 some machines, don't use any hard registers at all. */
5504 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5505 && (SMALL_REGISTER_CLASSES
5506 || (call_used_regs[REGNO (x)] && call_seen)))
5507 return 0;
5509 /* Don't use registers that have been clobbered before the start of the
5510 loop. */
5511 if (reg_set_between_p (x, insn, loop_start))
5512 return 0;
5514 return 1;
5517 /* Scan X for memory refs and check each memory address
5518 as a possible giv. INSN is the insn whose pattern X comes from.
5519 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5520 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5521 more thanonce in each loop iteration. */
5523 static void
5524 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5525 const struct loop *loop;
5526 rtx x;
5527 rtx insn;
5528 int not_every_iteration, maybe_multiple;
5530 int i, j;
5531 enum rtx_code code;
5532 const char *fmt;
5534 if (x == 0)
5535 return;
5537 code = GET_CODE (x);
5538 switch (code)
5540 case REG:
5541 case CONST_INT:
5542 case CONST:
5543 case CONST_DOUBLE:
5544 case SYMBOL_REF:
5545 case LABEL_REF:
5546 case PC:
5547 case CC0:
5548 case ADDR_VEC:
5549 case ADDR_DIFF_VEC:
5550 case USE:
5551 case CLOBBER:
5552 return;
5554 case MEM:
5556 rtx src_reg;
5557 rtx add_val;
5558 rtx mult_val;
5559 rtx ext_val;
5560 int benefit;
5562 /* This code used to disable creating GIVs with mult_val == 1 and
5563 add_val == 0. However, this leads to lost optimizations when
5564 it comes time to combine a set of related DEST_ADDR GIVs, since
5565 this one would not be seen. */
5567 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5568 &mult_val, &ext_val, 1, &benefit,
5569 GET_MODE (x)))
5571 /* Found one; record it. */
5572 struct induction *v
5573 = (struct induction *) xmalloc (sizeof (struct induction));
5575 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5576 add_val, ext_val, benefit, DEST_ADDR,
5577 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5579 v->mem = x;
5582 return;
5584 default:
5585 break;
5588 /* Recursively scan the subexpressions for other mem refs. */
5590 fmt = GET_RTX_FORMAT (code);
5591 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5592 if (fmt[i] == 'e')
5593 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5594 maybe_multiple);
5595 else if (fmt[i] == 'E')
5596 for (j = 0; j < XVECLEN (x, i); j++)
5597 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5598 maybe_multiple);
5601 /* Fill in the data about one biv update.
5602 V is the `struct induction' in which we record the biv. (It is
5603 allocated by the caller, with alloca.)
5604 INSN is the insn that sets it.
5605 DEST_REG is the biv's reg.
5607 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5608 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5609 being set to INC_VAL.
5611 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5612 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5613 can be executed more than once per iteration. If MAYBE_MULTIPLE
5614 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5615 executed exactly once per iteration. */
5617 static void
5618 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5619 not_every_iteration, maybe_multiple)
5620 struct loop *loop;
5621 struct induction *v;
5622 rtx insn;
5623 rtx dest_reg;
5624 rtx inc_val;
5625 rtx mult_val;
5626 rtx *location;
5627 int not_every_iteration;
5628 int maybe_multiple;
5630 struct loop_ivs *ivs = LOOP_IVS (loop);
5631 struct iv_class *bl;
5633 v->insn = insn;
5634 v->src_reg = dest_reg;
5635 v->dest_reg = dest_reg;
5636 v->mult_val = mult_val;
5637 v->add_val = inc_val;
5638 v->ext_dependent = NULL_RTX;
5639 v->location = location;
5640 v->mode = GET_MODE (dest_reg);
5641 v->always_computable = ! not_every_iteration;
5642 v->always_executed = ! not_every_iteration;
5643 v->maybe_multiple = maybe_multiple;
5645 /* Add this to the reg's iv_class, creating a class
5646 if this is the first incrementation of the reg. */
5648 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5649 if (bl == 0)
5651 /* Create and initialize new iv_class. */
5653 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5655 bl->regno = REGNO (dest_reg);
5656 bl->biv = 0;
5657 bl->giv = 0;
5658 bl->biv_count = 0;
5659 bl->giv_count = 0;
5661 /* Set initial value to the reg itself. */
5662 bl->initial_value = dest_reg;
5663 bl->final_value = 0;
5664 /* We haven't seen the initializing insn yet */
5665 bl->init_insn = 0;
5666 bl->init_set = 0;
5667 bl->initial_test = 0;
5668 bl->incremented = 0;
5669 bl->eliminable = 0;
5670 bl->nonneg = 0;
5671 bl->reversed = 0;
5672 bl->total_benefit = 0;
5674 /* Add this class to ivs->list. */
5675 bl->next = ivs->list;
5676 ivs->list = bl;
5678 /* Put it in the array of biv register classes. */
5679 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5682 /* Update IV_CLASS entry for this biv. */
5683 v->next_iv = bl->biv;
5684 bl->biv = v;
5685 bl->biv_count++;
5686 if (mult_val == const1_rtx)
5687 bl->incremented = 1;
5689 if (loop_dump_stream)
5690 loop_biv_dump (v, loop_dump_stream, 0);
5693 /* Fill in the data about one giv.
5694 V is the `struct induction' in which we record the giv. (It is
5695 allocated by the caller, with alloca.)
5696 INSN is the insn that sets it.
5697 BENEFIT estimates the savings from deleting this insn.
5698 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5699 into a register or is used as a memory address.
5701 SRC_REG is the biv reg which the giv is computed from.
5702 DEST_REG is the giv's reg (if the giv is stored in a reg).
5703 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5704 LOCATION points to the place where this giv's value appears in INSN. */
5706 static void
5707 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5708 benefit, type, not_every_iteration, maybe_multiple, location)
5709 const struct loop *loop;
5710 struct induction *v;
5711 rtx insn;
5712 rtx src_reg;
5713 rtx dest_reg;
5714 rtx mult_val, add_val, ext_val;
5715 int benefit;
5716 enum g_types type;
5717 int not_every_iteration, maybe_multiple;
5718 rtx *location;
5720 struct loop_ivs *ivs = LOOP_IVS (loop);
5721 struct induction *b;
5722 struct iv_class *bl;
5723 rtx set = single_set (insn);
5724 rtx temp;
5726 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5727 undo the MULT canonicalization that we performed earlier. */
5728 temp = simplify_rtx (add_val);
5729 if (temp
5730 && ! (GET_CODE (add_val) == MULT
5731 && GET_CODE (temp) == ASHIFT))
5732 add_val = temp;
5734 v->insn = insn;
5735 v->src_reg = src_reg;
5736 v->giv_type = type;
5737 v->dest_reg = dest_reg;
5738 v->mult_val = mult_val;
5739 v->add_val = add_val;
5740 v->ext_dependent = ext_val;
5741 v->benefit = benefit;
5742 v->location = location;
5743 v->cant_derive = 0;
5744 v->combined_with = 0;
5745 v->maybe_multiple = maybe_multiple;
5746 v->maybe_dead = 0;
5747 v->derive_adjustment = 0;
5748 v->same = 0;
5749 v->ignore = 0;
5750 v->new_reg = 0;
5751 v->final_value = 0;
5752 v->same_insn = 0;
5753 v->auto_inc_opt = 0;
5754 v->unrolled = 0;
5755 v->shared = 0;
5757 /* The v->always_computable field is used in update_giv_derive, to
5758 determine whether a giv can be used to derive another giv. For a
5759 DEST_REG giv, INSN computes a new value for the giv, so its value
5760 isn't computable if INSN insn't executed every iteration.
5761 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5762 it does not compute a new value. Hence the value is always computable
5763 regardless of whether INSN is executed each iteration. */
5765 if (type == DEST_ADDR)
5766 v->always_computable = 1;
5767 else
5768 v->always_computable = ! not_every_iteration;
5770 v->always_executed = ! not_every_iteration;
5772 if (type == DEST_ADDR)
5774 v->mode = GET_MODE (*location);
5775 v->lifetime = 1;
5777 else /* type == DEST_REG */
5779 v->mode = GET_MODE (SET_DEST (set));
5781 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5783 /* If the lifetime is zero, it means that this register is
5784 really a dead store. So mark this as a giv that can be
5785 ignored. This will not prevent the biv from being eliminated. */
5786 if (v->lifetime == 0)
5787 v->ignore = 1;
5789 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5790 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5793 /* Add the giv to the class of givs computed from one biv. */
5795 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5796 if (bl)
5798 v->next_iv = bl->giv;
5799 bl->giv = v;
5800 /* Don't count DEST_ADDR. This is supposed to count the number of
5801 insns that calculate givs. */
5802 if (type == DEST_REG)
5803 bl->giv_count++;
5804 bl->total_benefit += benefit;
5806 else
5807 /* Fatal error, biv missing for this giv? */
5808 abort ();
5810 if (type == DEST_ADDR)
5811 v->replaceable = 1;
5812 else
5814 /* The giv can be replaced outright by the reduced register only if all
5815 of the following conditions are true:
5816 - the insn that sets the giv is always executed on any iteration
5817 on which the giv is used at all
5818 (there are two ways to deduce this:
5819 either the insn is executed on every iteration,
5820 or all uses follow that insn in the same basic block),
5821 - the giv is not used outside the loop
5822 - no assignments to the biv occur during the giv's lifetime. */
5824 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5825 /* Previous line always fails if INSN was moved by loop opt. */
5826 && REGNO_LAST_LUID (REGNO (dest_reg))
5827 < INSN_LUID (loop->end)
5828 && (! not_every_iteration
5829 || last_use_this_basic_block (dest_reg, insn)))
5831 /* Now check that there are no assignments to the biv within the
5832 giv's lifetime. This requires two separate checks. */
5834 /* Check each biv update, and fail if any are between the first
5835 and last use of the giv.
5837 If this loop contains an inner loop that was unrolled, then
5838 the insn modifying the biv may have been emitted by the loop
5839 unrolling code, and hence does not have a valid luid. Just
5840 mark the biv as not replaceable in this case. It is not very
5841 useful as a biv, because it is used in two different loops.
5842 It is very unlikely that we would be able to optimize the giv
5843 using this biv anyways. */
5845 v->replaceable = 1;
5846 for (b = bl->biv; b; b = b->next_iv)
5848 if (INSN_UID (b->insn) >= max_uid_for_loop
5849 || ((INSN_LUID (b->insn)
5850 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5851 && (INSN_LUID (b->insn)
5852 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5854 v->replaceable = 0;
5855 v->not_replaceable = 1;
5856 break;
5860 /* If there are any backwards branches that go from after the
5861 biv update to before it, then this giv is not replaceable. */
5862 if (v->replaceable)
5863 for (b = bl->biv; b; b = b->next_iv)
5864 if (back_branch_in_range_p (loop, b->insn))
5866 v->replaceable = 0;
5867 v->not_replaceable = 1;
5868 break;
5871 else
5873 /* May still be replaceable, we don't have enough info here to
5874 decide. */
5875 v->replaceable = 0;
5876 v->not_replaceable = 0;
5880 /* Record whether the add_val contains a const_int, for later use by
5881 combine_givs. */
5883 rtx tem = add_val;
5885 v->no_const_addval = 1;
5886 if (tem == const0_rtx)
5888 else if (CONSTANT_P (add_val))
5889 v->no_const_addval = 0;
5890 if (GET_CODE (tem) == PLUS)
5892 while (1)
5894 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5895 tem = XEXP (tem, 0);
5896 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5897 tem = XEXP (tem, 1);
5898 else
5899 break;
5901 if (CONSTANT_P (XEXP (tem, 1)))
5902 v->no_const_addval = 0;
5906 if (loop_dump_stream)
5907 loop_giv_dump (v, loop_dump_stream, 0);
5910 /* All this does is determine whether a giv can be made replaceable because
5911 its final value can be calculated. This code can not be part of record_giv
5912 above, because final_giv_value requires that the number of loop iterations
5913 be known, and that can not be accurately calculated until after all givs
5914 have been identified. */
5916 static void
5917 check_final_value (loop, v)
5918 const struct loop *loop;
5919 struct induction *v;
5921 struct loop_ivs *ivs = LOOP_IVS (loop);
5922 struct iv_class *bl;
5923 rtx final_value = 0;
5925 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5927 /* DEST_ADDR givs will never reach here, because they are always marked
5928 replaceable above in record_giv. */
5930 /* The giv can be replaced outright by the reduced register only if all
5931 of the following conditions are true:
5932 - the insn that sets the giv is always executed on any iteration
5933 on which the giv is used at all
5934 (there are two ways to deduce this:
5935 either the insn is executed on every iteration,
5936 or all uses follow that insn in the same basic block),
5937 - its final value can be calculated (this condition is different
5938 than the one above in record_giv)
5939 - it's not used before the it's set
5940 - no assignments to the biv occur during the giv's lifetime. */
5942 #if 0
5943 /* This is only called now when replaceable is known to be false. */
5944 /* Clear replaceable, so that it won't confuse final_giv_value. */
5945 v->replaceable = 0;
5946 #endif
5948 if ((final_value = final_giv_value (loop, v))
5949 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5951 int biv_increment_seen = 0, before_giv_insn = 0;
5952 rtx p = v->insn;
5953 rtx last_giv_use;
5955 v->replaceable = 1;
5957 /* When trying to determine whether or not a biv increment occurs
5958 during the lifetime of the giv, we can ignore uses of the variable
5959 outside the loop because final_value is true. Hence we can not
5960 use regno_last_uid and regno_first_uid as above in record_giv. */
5962 /* Search the loop to determine whether any assignments to the
5963 biv occur during the giv's lifetime. Start with the insn
5964 that sets the giv, and search around the loop until we come
5965 back to that insn again.
5967 Also fail if there is a jump within the giv's lifetime that jumps
5968 to somewhere outside the lifetime but still within the loop. This
5969 catches spaghetti code where the execution order is not linear, and
5970 hence the above test fails. Here we assume that the giv lifetime
5971 does not extend from one iteration of the loop to the next, so as
5972 to make the test easier. Since the lifetime isn't known yet,
5973 this requires two loops. See also record_giv above. */
5975 last_giv_use = v->insn;
5977 while (1)
5979 p = NEXT_INSN (p);
5980 if (p == loop->end)
5982 before_giv_insn = 1;
5983 p = NEXT_INSN (loop->start);
5985 if (p == v->insn)
5986 break;
5988 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5989 || GET_CODE (p) == CALL_INSN)
5991 /* It is possible for the BIV increment to use the GIV if we
5992 have a cycle. Thus we must be sure to check each insn for
5993 both BIV and GIV uses, and we must check for BIV uses
5994 first. */
5996 if (! biv_increment_seen
5997 && reg_set_p (v->src_reg, PATTERN (p)))
5998 biv_increment_seen = 1;
6000 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6002 if (biv_increment_seen || before_giv_insn)
6004 v->replaceable = 0;
6005 v->not_replaceable = 1;
6006 break;
6008 last_giv_use = p;
6013 /* Now that the lifetime of the giv is known, check for branches
6014 from within the lifetime to outside the lifetime if it is still
6015 replaceable. */
6017 if (v->replaceable)
6019 p = v->insn;
6020 while (1)
6022 p = NEXT_INSN (p);
6023 if (p == loop->end)
6024 p = NEXT_INSN (loop->start);
6025 if (p == last_giv_use)
6026 break;
6028 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6029 && LABEL_NAME (JUMP_LABEL (p))
6030 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6031 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6032 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6033 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6035 v->replaceable = 0;
6036 v->not_replaceable = 1;
6038 if (loop_dump_stream)
6039 fprintf (loop_dump_stream,
6040 "Found branch outside giv lifetime.\n");
6042 break;
6047 /* If it is replaceable, then save the final value. */
6048 if (v->replaceable)
6049 v->final_value = final_value;
6052 if (loop_dump_stream && v->replaceable)
6053 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6054 INSN_UID (v->insn), REGNO (v->dest_reg));
6057 /* Update the status of whether a giv can derive other givs.
6059 We need to do something special if there is or may be an update to the biv
6060 between the time the giv is defined and the time it is used to derive
6061 another giv.
6063 In addition, a giv that is only conditionally set is not allowed to
6064 derive another giv once a label has been passed.
6066 The cases we look at are when a label or an update to a biv is passed. */
6068 static void
6069 update_giv_derive (loop, p)
6070 const struct loop *loop;
6071 rtx p;
6073 struct loop_ivs *ivs = LOOP_IVS (loop);
6074 struct iv_class *bl;
6075 struct induction *biv, *giv;
6076 rtx tem;
6077 int dummy;
6079 /* Search all IV classes, then all bivs, and finally all givs.
6081 There are three cases we are concerned with. First we have the situation
6082 of a giv that is only updated conditionally. In that case, it may not
6083 derive any givs after a label is passed.
6085 The second case is when a biv update occurs, or may occur, after the
6086 definition of a giv. For certain biv updates (see below) that are
6087 known to occur between the giv definition and use, we can adjust the
6088 giv definition. For others, or when the biv update is conditional,
6089 we must prevent the giv from deriving any other givs. There are two
6090 sub-cases within this case.
6092 If this is a label, we are concerned with any biv update that is done
6093 conditionally, since it may be done after the giv is defined followed by
6094 a branch here (actually, we need to pass both a jump and a label, but
6095 this extra tracking doesn't seem worth it).
6097 If this is a jump, we are concerned about any biv update that may be
6098 executed multiple times. We are actually only concerned about
6099 backward jumps, but it is probably not worth performing the test
6100 on the jump again here.
6102 If this is a biv update, we must adjust the giv status to show that a
6103 subsequent biv update was performed. If this adjustment cannot be done,
6104 the giv cannot derive further givs. */
6106 for (bl = ivs->list; bl; bl = bl->next)
6107 for (biv = bl->biv; biv; biv = biv->next_iv)
6108 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6109 || biv->insn == p)
6111 for (giv = bl->giv; giv; giv = giv->next_iv)
6113 /* If cant_derive is already true, there is no point in
6114 checking all of these conditions again. */
6115 if (giv->cant_derive)
6116 continue;
6118 /* If this giv is conditionally set and we have passed a label,
6119 it cannot derive anything. */
6120 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6121 giv->cant_derive = 1;
6123 /* Skip givs that have mult_val == 0, since
6124 they are really invariants. Also skip those that are
6125 replaceable, since we know their lifetime doesn't contain
6126 any biv update. */
6127 else if (giv->mult_val == const0_rtx || giv->replaceable)
6128 continue;
6130 /* The only way we can allow this giv to derive another
6131 is if this is a biv increment and we can form the product
6132 of biv->add_val and giv->mult_val. In this case, we will
6133 be able to compute a compensation. */
6134 else if (biv->insn == p)
6136 rtx ext_val_dummy;
6138 tem = 0;
6139 if (biv->mult_val == const1_rtx)
6140 tem = simplify_giv_expr (loop,
6141 gen_rtx_MULT (giv->mode,
6142 biv->add_val,
6143 giv->mult_val),
6144 &ext_val_dummy, &dummy);
6146 if (tem && giv->derive_adjustment)
6147 tem = simplify_giv_expr
6148 (loop,
6149 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6150 &ext_val_dummy, &dummy);
6152 if (tem)
6153 giv->derive_adjustment = tem;
6154 else
6155 giv->cant_derive = 1;
6157 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6158 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6159 giv->cant_derive = 1;
6164 /* Check whether an insn is an increment legitimate for a basic induction var.
6165 X is the source of insn P, or a part of it.
6166 MODE is the mode in which X should be interpreted.
6168 DEST_REG is the putative biv, also the destination of the insn.
6169 We accept patterns of these forms:
6170 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6171 REG = INVARIANT + REG
6173 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6174 store the additive term into *INC_VAL, and store the place where
6175 we found the additive term into *LOCATION.
6177 If X is an assignment of an invariant into DEST_REG, we set
6178 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6180 We also want to detect a BIV when it corresponds to a variable
6181 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6182 of the variable may be a PLUS that adds a SUBREG of that variable to
6183 an invariant and then sign- or zero-extends the result of the PLUS
6184 into the variable.
6186 Most GIVs in such cases will be in the promoted mode, since that is the
6187 probably the natural computation mode (and almost certainly the mode
6188 used for addresses) on the machine. So we view the pseudo-reg containing
6189 the variable as the BIV, as if it were simply incremented.
6191 Note that treating the entire pseudo as a BIV will result in making
6192 simple increments to any GIVs based on it. However, if the variable
6193 overflows in its declared mode but not its promoted mode, the result will
6194 be incorrect. This is acceptable if the variable is signed, since
6195 overflows in such cases are undefined, but not if it is unsigned, since
6196 those overflows are defined. So we only check for SIGN_EXTEND and
6197 not ZERO_EXTEND.
6199 If we cannot find a biv, we return 0. */
6201 static int
6202 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6203 const struct loop *loop;
6204 rtx x;
6205 enum machine_mode mode;
6206 rtx dest_reg;
6207 rtx p;
6208 rtx *inc_val;
6209 rtx *mult_val;
6210 rtx **location;
6212 enum rtx_code code;
6213 rtx *argp, arg;
6214 rtx insn, set = 0;
6216 code = GET_CODE (x);
6217 *location = NULL;
6218 switch (code)
6220 case PLUS:
6221 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6222 || (GET_CODE (XEXP (x, 0)) == SUBREG
6223 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6224 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6226 argp = &XEXP (x, 1);
6228 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6229 || (GET_CODE (XEXP (x, 1)) == SUBREG
6230 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6231 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6233 argp = &XEXP (x, 0);
6235 else
6236 return 0;
6238 arg = *argp;
6239 if (loop_invariant_p (loop, arg) != 1)
6240 return 0;
6242 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6243 *mult_val = const1_rtx;
6244 *location = argp;
6245 return 1;
6247 case SUBREG:
6248 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6249 handle addition of promoted variables.
6250 ??? The comment at the start of this function is wrong: promoted
6251 variable increments don't look like it says they do. */
6252 return basic_induction_var (loop, SUBREG_REG (x),
6253 GET_MODE (SUBREG_REG (x)),
6254 dest_reg, p, inc_val, mult_val, location);
6256 case REG:
6257 /* If this register is assigned in a previous insn, look at its
6258 source, but don't go outside the loop or past a label. */
6260 /* If this sets a register to itself, we would repeat any previous
6261 biv increment if we applied this strategy blindly. */
6262 if (rtx_equal_p (dest_reg, x))
6263 return 0;
6265 insn = p;
6266 while (1)
6268 rtx dest;
6271 insn = PREV_INSN (insn);
6273 while (insn && GET_CODE (insn) == NOTE
6274 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6276 if (!insn)
6277 break;
6278 set = single_set (insn);
6279 if (set == 0)
6280 break;
6281 dest = SET_DEST (set);
6282 if (dest == x
6283 || (GET_CODE (dest) == SUBREG
6284 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6285 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6286 && SUBREG_REG (dest) == x))
6287 return basic_induction_var (loop, SET_SRC (set),
6288 (GET_MODE (SET_SRC (set)) == VOIDmode
6289 ? GET_MODE (x)
6290 : GET_MODE (SET_SRC (set))),
6291 dest_reg, insn,
6292 inc_val, mult_val, location);
6294 while (GET_CODE (dest) == SIGN_EXTRACT
6295 || GET_CODE (dest) == ZERO_EXTRACT
6296 || GET_CODE (dest) == SUBREG
6297 || GET_CODE (dest) == STRICT_LOW_PART)
6298 dest = XEXP (dest, 0);
6299 if (dest == x)
6300 break;
6302 /* Fall through. */
6304 /* Can accept constant setting of biv only when inside inner most loop.
6305 Otherwise, a biv of an inner loop may be incorrectly recognized
6306 as a biv of the outer loop,
6307 causing code to be moved INTO the inner loop. */
6308 case MEM:
6309 if (loop_invariant_p (loop, x) != 1)
6310 return 0;
6311 case CONST_INT:
6312 case SYMBOL_REF:
6313 case CONST:
6314 /* convert_modes aborts if we try to convert to or from CCmode, so just
6315 exclude that case. It is very unlikely that a condition code value
6316 would be a useful iterator anyways. convert_modes aborts if we try to
6317 convert a float mode to non-float or vice versa too. */
6318 if (loop->level == 1
6319 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6320 && GET_MODE_CLASS (mode) != MODE_CC)
6322 /* Possible bug here? Perhaps we don't know the mode of X. */
6323 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6324 *mult_val = const0_rtx;
6325 return 1;
6327 else
6328 return 0;
6330 case SIGN_EXTEND:
6331 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6332 dest_reg, p, inc_val, mult_val, location);
6334 case ASHIFTRT:
6335 /* Similar, since this can be a sign extension. */
6336 for (insn = PREV_INSN (p);
6337 (insn && GET_CODE (insn) == NOTE
6338 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6339 insn = PREV_INSN (insn))
6342 if (insn)
6343 set = single_set (insn);
6345 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6346 && set && SET_DEST (set) == XEXP (x, 0)
6347 && GET_CODE (XEXP (x, 1)) == CONST_INT
6348 && INTVAL (XEXP (x, 1)) >= 0
6349 && GET_CODE (SET_SRC (set)) == ASHIFT
6350 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6351 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6352 GET_MODE (XEXP (x, 0)),
6353 dest_reg, insn, inc_val, mult_val,
6354 location);
6355 return 0;
6357 default:
6358 return 0;
6362 /* A general induction variable (giv) is any quantity that is a linear
6363 function of a basic induction variable,
6364 i.e. giv = biv * mult_val + add_val.
6365 The coefficients can be any loop invariant quantity.
6366 A giv need not be computed directly from the biv;
6367 it can be computed by way of other givs. */
6369 /* Determine whether X computes a giv.
6370 If it does, return a nonzero value
6371 which is the benefit from eliminating the computation of X;
6372 set *SRC_REG to the register of the biv that it is computed from;
6373 set *ADD_VAL and *MULT_VAL to the coefficients,
6374 such that the value of X is biv * mult + add; */
6376 static int
6377 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6378 is_addr, pbenefit, addr_mode)
6379 const struct loop *loop;
6380 rtx x;
6381 rtx *src_reg;
6382 rtx *add_val;
6383 rtx *mult_val;
6384 rtx *ext_val;
6385 int is_addr;
6386 int *pbenefit;
6387 enum machine_mode addr_mode;
6389 struct loop_ivs *ivs = LOOP_IVS (loop);
6390 rtx orig_x = x;
6392 /* If this is an invariant, forget it, it isn't a giv. */
6393 if (loop_invariant_p (loop, x) == 1)
6394 return 0;
6396 *pbenefit = 0;
6397 *ext_val = NULL_RTX;
6398 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6399 if (x == 0)
6400 return 0;
6402 switch (GET_CODE (x))
6404 case USE:
6405 case CONST_INT:
6406 /* Since this is now an invariant and wasn't before, it must be a giv
6407 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6408 with. */
6409 *src_reg = ivs->list->biv->dest_reg;
6410 *mult_val = const0_rtx;
6411 *add_val = x;
6412 break;
6414 case REG:
6415 /* This is equivalent to a BIV. */
6416 *src_reg = x;
6417 *mult_val = const1_rtx;
6418 *add_val = const0_rtx;
6419 break;
6421 case PLUS:
6422 /* Either (plus (biv) (invar)) or
6423 (plus (mult (biv) (invar_1)) (invar_2)). */
6424 if (GET_CODE (XEXP (x, 0)) == MULT)
6426 *src_reg = XEXP (XEXP (x, 0), 0);
6427 *mult_val = XEXP (XEXP (x, 0), 1);
6429 else
6431 *src_reg = XEXP (x, 0);
6432 *mult_val = const1_rtx;
6434 *add_val = XEXP (x, 1);
6435 break;
6437 case MULT:
6438 /* ADD_VAL is zero. */
6439 *src_reg = XEXP (x, 0);
6440 *mult_val = XEXP (x, 1);
6441 *add_val = const0_rtx;
6442 break;
6444 default:
6445 abort ();
6448 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6449 unless they are CONST_INT). */
6450 if (GET_CODE (*add_val) == USE)
6451 *add_val = XEXP (*add_val, 0);
6452 if (GET_CODE (*mult_val) == USE)
6453 *mult_val = XEXP (*mult_val, 0);
6455 if (is_addr)
6456 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6457 else
6458 *pbenefit += rtx_cost (orig_x, SET);
6460 /* Always return true if this is a giv so it will be detected as such,
6461 even if the benefit is zero or negative. This allows elimination
6462 of bivs that might otherwise not be eliminated. */
6463 return 1;
6466 /* Given an expression, X, try to form it as a linear function of a biv.
6467 We will canonicalize it to be of the form
6468 (plus (mult (BIV) (invar_1))
6469 (invar_2))
6470 with possible degeneracies.
6472 The invariant expressions must each be of a form that can be used as a
6473 machine operand. We surround then with a USE rtx (a hack, but localized
6474 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6475 routine; it is the caller's responsibility to strip them.
6477 If no such canonicalization is possible (i.e., two biv's are used or an
6478 expression that is neither invariant nor a biv or giv), this routine
6479 returns 0.
6481 For a non-zero return, the result will have a code of CONST_INT, USE,
6482 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6484 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6486 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6487 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6489 static rtx
6490 simplify_giv_expr (loop, x, ext_val, benefit)
6491 const struct loop *loop;
6492 rtx x;
6493 rtx *ext_val;
6494 int *benefit;
6496 struct loop_ivs *ivs = LOOP_IVS (loop);
6497 struct loop_regs *regs = LOOP_REGS (loop);
6498 enum machine_mode mode = GET_MODE (x);
6499 rtx arg0, arg1;
6500 rtx tem;
6502 /* If this is not an integer mode, or if we cannot do arithmetic in this
6503 mode, this can't be a giv. */
6504 if (mode != VOIDmode
6505 && (GET_MODE_CLASS (mode) != MODE_INT
6506 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6507 return NULL_RTX;
6509 switch (GET_CODE (x))
6511 case PLUS:
6512 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6513 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6514 if (arg0 == 0 || arg1 == 0)
6515 return NULL_RTX;
6517 /* Put constant last, CONST_INT last if both constant. */
6518 if ((GET_CODE (arg0) == USE
6519 || GET_CODE (arg0) == CONST_INT)
6520 && ! ((GET_CODE (arg0) == USE
6521 && GET_CODE (arg1) == USE)
6522 || GET_CODE (arg1) == CONST_INT))
6523 tem = arg0, arg0 = arg1, arg1 = tem;
6525 /* Handle addition of zero, then addition of an invariant. */
6526 if (arg1 == const0_rtx)
6527 return arg0;
6528 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6529 switch (GET_CODE (arg0))
6531 case CONST_INT:
6532 case USE:
6533 /* Adding two invariants must result in an invariant, so enclose
6534 addition operation inside a USE and return it. */
6535 if (GET_CODE (arg0) == USE)
6536 arg0 = XEXP (arg0, 0);
6537 if (GET_CODE (arg1) == USE)
6538 arg1 = XEXP (arg1, 0);
6540 if (GET_CODE (arg0) == CONST_INT)
6541 tem = arg0, arg0 = arg1, arg1 = tem;
6542 if (GET_CODE (arg1) == CONST_INT)
6543 tem = sge_plus_constant (arg0, arg1);
6544 else
6545 tem = sge_plus (mode, arg0, arg1);
6547 if (GET_CODE (tem) != CONST_INT)
6548 tem = gen_rtx_USE (mode, tem);
6549 return tem;
6551 case REG:
6552 case MULT:
6553 /* biv + invar or mult + invar. Return sum. */
6554 return gen_rtx_PLUS (mode, arg0, arg1);
6556 case PLUS:
6557 /* (a + invar_1) + invar_2. Associate. */
6558 return
6559 simplify_giv_expr (loop,
6560 gen_rtx_PLUS (mode,
6561 XEXP (arg0, 0),
6562 gen_rtx_PLUS (mode,
6563 XEXP (arg0, 1),
6564 arg1)),
6565 ext_val, benefit);
6567 default:
6568 abort ();
6571 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6572 MULT to reduce cases. */
6573 if (GET_CODE (arg0) == REG)
6574 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6575 if (GET_CODE (arg1) == REG)
6576 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6578 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6579 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6580 Recurse to associate the second PLUS. */
6581 if (GET_CODE (arg1) == MULT)
6582 tem = arg0, arg0 = arg1, arg1 = tem;
6584 if (GET_CODE (arg1) == PLUS)
6585 return
6586 simplify_giv_expr (loop,
6587 gen_rtx_PLUS (mode,
6588 gen_rtx_PLUS (mode, arg0,
6589 XEXP (arg1, 0)),
6590 XEXP (arg1, 1)),
6591 ext_val, benefit);
6593 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6594 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6595 return NULL_RTX;
6597 if (!rtx_equal_p (arg0, arg1))
6598 return NULL_RTX;
6600 return simplify_giv_expr (loop,
6601 gen_rtx_MULT (mode,
6602 XEXP (arg0, 0),
6603 gen_rtx_PLUS (mode,
6604 XEXP (arg0, 1),
6605 XEXP (arg1, 1))),
6606 ext_val, benefit);
6608 case MINUS:
6609 /* Handle "a - b" as "a + b * (-1)". */
6610 return simplify_giv_expr (loop,
6611 gen_rtx_PLUS (mode,
6612 XEXP (x, 0),
6613 gen_rtx_MULT (mode,
6614 XEXP (x, 1),
6615 constm1_rtx)),
6616 ext_val, benefit);
6618 case MULT:
6619 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6620 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6621 if (arg0 == 0 || arg1 == 0)
6622 return NULL_RTX;
6624 /* Put constant last, CONST_INT last if both constant. */
6625 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6626 && GET_CODE (arg1) != CONST_INT)
6627 tem = arg0, arg0 = arg1, arg1 = tem;
6629 /* If second argument is not now constant, not giv. */
6630 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6631 return NULL_RTX;
6633 /* Handle multiply by 0 or 1. */
6634 if (arg1 == const0_rtx)
6635 return const0_rtx;
6637 else if (arg1 == const1_rtx)
6638 return arg0;
6640 switch (GET_CODE (arg0))
6642 case REG:
6643 /* biv * invar. Done. */
6644 return gen_rtx_MULT (mode, arg0, arg1);
6646 case CONST_INT:
6647 /* Product of two constants. */
6648 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6650 case USE:
6651 /* invar * invar is a giv, but attempt to simplify it somehow. */
6652 if (GET_CODE (arg1) != CONST_INT)
6653 return NULL_RTX;
6655 arg0 = XEXP (arg0, 0);
6656 if (GET_CODE (arg0) == MULT)
6658 /* (invar_0 * invar_1) * invar_2. Associate. */
6659 return simplify_giv_expr (loop,
6660 gen_rtx_MULT (mode,
6661 XEXP (arg0, 0),
6662 gen_rtx_MULT (mode,
6663 XEXP (arg0,
6665 arg1)),
6666 ext_val, benefit);
6668 /* Porpagate the MULT expressions to the intermost nodes. */
6669 else if (GET_CODE (arg0) == PLUS)
6671 /* (invar_0 + invar_1) * invar_2. Distribute. */
6672 return simplify_giv_expr (loop,
6673 gen_rtx_PLUS (mode,
6674 gen_rtx_MULT (mode,
6675 XEXP (arg0,
6677 arg1),
6678 gen_rtx_MULT (mode,
6679 XEXP (arg0,
6681 arg1)),
6682 ext_val, benefit);
6684 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6686 case MULT:
6687 /* (a * invar_1) * invar_2. Associate. */
6688 return simplify_giv_expr (loop,
6689 gen_rtx_MULT (mode,
6690 XEXP (arg0, 0),
6691 gen_rtx_MULT (mode,
6692 XEXP (arg0, 1),
6693 arg1)),
6694 ext_val, benefit);
6696 case PLUS:
6697 /* (a + invar_1) * invar_2. Distribute. */
6698 return simplify_giv_expr (loop,
6699 gen_rtx_PLUS (mode,
6700 gen_rtx_MULT (mode,
6701 XEXP (arg0, 0),
6702 arg1),
6703 gen_rtx_MULT (mode,
6704 XEXP (arg0, 1),
6705 arg1)),
6706 ext_val, benefit);
6708 default:
6709 abort ();
6712 case ASHIFT:
6713 /* Shift by constant is multiply by power of two. */
6714 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6715 return 0;
6717 return
6718 simplify_giv_expr (loop,
6719 gen_rtx_MULT (mode,
6720 XEXP (x, 0),
6721 GEN_INT ((HOST_WIDE_INT) 1
6722 << INTVAL (XEXP (x, 1)))),
6723 ext_val, benefit);
6725 case NEG:
6726 /* "-a" is "a * (-1)" */
6727 return simplify_giv_expr (loop,
6728 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6729 ext_val, benefit);
6731 case NOT:
6732 /* "~a" is "-a - 1". Silly, but easy. */
6733 return simplify_giv_expr (loop,
6734 gen_rtx_MINUS (mode,
6735 gen_rtx_NEG (mode, XEXP (x, 0)),
6736 const1_rtx),
6737 ext_val, benefit);
6739 case USE:
6740 /* Already in proper form for invariant. */
6741 return x;
6743 case SIGN_EXTEND:
6744 case ZERO_EXTEND:
6745 case TRUNCATE:
6746 /* Conditionally recognize extensions of simple IVs. After we've
6747 computed loop traversal counts and verified the range of the
6748 source IV, we'll reevaluate this as a GIV. */
6749 if (*ext_val == NULL_RTX)
6751 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6752 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6754 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6755 return arg0;
6758 goto do_default;
6760 case REG:
6761 /* If this is a new register, we can't deal with it. */
6762 if (REGNO (x) >= max_reg_before_loop)
6763 return 0;
6765 /* Check for biv or giv. */
6766 switch (REG_IV_TYPE (ivs, REGNO (x)))
6768 case BASIC_INDUCT:
6769 return x;
6770 case GENERAL_INDUCT:
6772 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6774 /* Form expression from giv and add benefit. Ensure this giv
6775 can derive another and subtract any needed adjustment if so. */
6777 /* Increasing the benefit here is risky. The only case in which it
6778 is arguably correct is if this is the only use of V. In other
6779 cases, this will artificially inflate the benefit of the current
6780 giv, and lead to suboptimal code. Thus, it is disabled, since
6781 potentially not reducing an only marginally beneficial giv is
6782 less harmful than reducing many givs that are not really
6783 beneficial. */
6785 rtx single_use = regs->array[REGNO (x)].single_usage;
6786 if (single_use && single_use != const0_rtx)
6787 *benefit += v->benefit;
6790 if (v->cant_derive)
6791 return 0;
6793 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6794 v->src_reg, v->mult_val),
6795 v->add_val);
6797 if (v->derive_adjustment)
6798 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6799 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6800 if (*ext_val)
6802 if (!v->ext_dependent)
6803 return arg0;
6805 else
6807 *ext_val = v->ext_dependent;
6808 return arg0;
6810 return 0;
6813 default:
6814 do_default:
6815 /* If it isn't an induction variable, and it is invariant, we
6816 may be able to simplify things further by looking through
6817 the bits we just moved outside the loop. */
6818 if (loop_invariant_p (loop, x) == 1)
6820 struct movable *m;
6821 struct loop_movables *movables = LOOP_MOVABLES (loop);
6823 for (m = movables->head; m; m = m->next)
6824 if (rtx_equal_p (x, m->set_dest))
6826 /* Ok, we found a match. Substitute and simplify. */
6828 /* If we match another movable, we must use that, as
6829 this one is going away. */
6830 if (m->match)
6831 return simplify_giv_expr (loop, m->match->set_dest,
6832 ext_val, benefit);
6834 /* If consec is non-zero, this is a member of a group of
6835 instructions that were moved together. We handle this
6836 case only to the point of seeking to the last insn and
6837 looking for a REG_EQUAL. Fail if we don't find one. */
6838 if (m->consec != 0)
6840 int i = m->consec;
6841 tem = m->insn;
6844 tem = NEXT_INSN (tem);
6846 while (--i > 0);
6848 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6849 if (tem)
6850 tem = XEXP (tem, 0);
6852 else
6854 tem = single_set (m->insn);
6855 if (tem)
6856 tem = SET_SRC (tem);
6859 if (tem)
6861 /* What we are most interested in is pointer
6862 arithmetic on invariants -- only take
6863 patterns we may be able to do something with. */
6864 if (GET_CODE (tem) == PLUS
6865 || GET_CODE (tem) == MULT
6866 || GET_CODE (tem) == ASHIFT
6867 || GET_CODE (tem) == CONST_INT
6868 || GET_CODE (tem) == SYMBOL_REF)
6870 tem = simplify_giv_expr (loop, tem, ext_val,
6871 benefit);
6872 if (tem)
6873 return tem;
6875 else if (GET_CODE (tem) == CONST
6876 && GET_CODE (XEXP (tem, 0)) == PLUS
6877 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6878 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6880 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6881 ext_val, benefit);
6882 if (tem)
6883 return tem;
6886 break;
6889 break;
6892 /* Fall through to general case. */
6893 default:
6894 /* If invariant, return as USE (unless CONST_INT).
6895 Otherwise, not giv. */
6896 if (GET_CODE (x) == USE)
6897 x = XEXP (x, 0);
6899 if (loop_invariant_p (loop, x) == 1)
6901 if (GET_CODE (x) == CONST_INT)
6902 return x;
6903 if (GET_CODE (x) == CONST
6904 && GET_CODE (XEXP (x, 0)) == PLUS
6905 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6906 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6907 x = XEXP (x, 0);
6908 return gen_rtx_USE (mode, x);
6910 else
6911 return 0;
6915 /* This routine folds invariants such that there is only ever one
6916 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6918 static rtx
6919 sge_plus_constant (x, c)
6920 rtx x, c;
6922 if (GET_CODE (x) == CONST_INT)
6923 return GEN_INT (INTVAL (x) + INTVAL (c));
6924 else if (GET_CODE (x) != PLUS)
6925 return gen_rtx_PLUS (GET_MODE (x), x, c);
6926 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6928 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6929 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6931 else if (GET_CODE (XEXP (x, 0)) == PLUS
6932 || GET_CODE (XEXP (x, 1)) != PLUS)
6934 return gen_rtx_PLUS (GET_MODE (x),
6935 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6937 else
6939 return gen_rtx_PLUS (GET_MODE (x),
6940 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6944 static rtx
6945 sge_plus (mode, x, y)
6946 enum machine_mode mode;
6947 rtx x, y;
6949 while (GET_CODE (y) == PLUS)
6951 rtx a = XEXP (y, 0);
6952 if (GET_CODE (a) == CONST_INT)
6953 x = sge_plus_constant (x, a);
6954 else
6955 x = gen_rtx_PLUS (mode, x, a);
6956 y = XEXP (y, 1);
6958 if (GET_CODE (y) == CONST_INT)
6959 x = sge_plus_constant (x, y);
6960 else
6961 x = gen_rtx_PLUS (mode, x, y);
6962 return x;
6965 /* Help detect a giv that is calculated by several consecutive insns;
6966 for example,
6967 giv = biv * M
6968 giv = giv + A
6969 The caller has already identified the first insn P as having a giv as dest;
6970 we check that all other insns that set the same register follow
6971 immediately after P, that they alter nothing else,
6972 and that the result of the last is still a giv.
6974 The value is 0 if the reg set in P is not really a giv.
6975 Otherwise, the value is the amount gained by eliminating
6976 all the consecutive insns that compute the value.
6978 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6979 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6981 The coefficients of the ultimate giv value are stored in
6982 *MULT_VAL and *ADD_VAL. */
6984 static int
6985 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6986 add_val, mult_val, ext_val, last_consec_insn)
6987 const struct loop *loop;
6988 int first_benefit;
6989 rtx p;
6990 rtx src_reg;
6991 rtx dest_reg;
6992 rtx *add_val;
6993 rtx *mult_val;
6994 rtx *ext_val;
6995 rtx *last_consec_insn;
6997 struct loop_ivs *ivs = LOOP_IVS (loop);
6998 struct loop_regs *regs = LOOP_REGS (loop);
6999 int count;
7000 enum rtx_code code;
7001 int benefit;
7002 rtx temp;
7003 rtx set;
7005 /* Indicate that this is a giv so that we can update the value produced in
7006 each insn of the multi-insn sequence.
7008 This induction structure will be used only by the call to
7009 general_induction_var below, so we can allocate it on our stack.
7010 If this is a giv, our caller will replace the induct var entry with
7011 a new induction structure. */
7012 struct induction *v;
7014 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7015 return 0;
7017 v = (struct induction *) alloca (sizeof (struct induction));
7018 v->src_reg = src_reg;
7019 v->mult_val = *mult_val;
7020 v->add_val = *add_val;
7021 v->benefit = first_benefit;
7022 v->cant_derive = 0;
7023 v->derive_adjustment = 0;
7024 v->ext_dependent = NULL_RTX;
7026 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7027 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7029 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7031 while (count > 0)
7033 p = NEXT_INSN (p);
7034 code = GET_CODE (p);
7036 /* If libcall, skip to end of call sequence. */
7037 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7038 p = XEXP (temp, 0);
7040 if (code == INSN
7041 && (set = single_set (p))
7042 && GET_CODE (SET_DEST (set)) == REG
7043 && SET_DEST (set) == dest_reg
7044 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7045 add_val, mult_val, ext_val, 0,
7046 &benefit, VOIDmode)
7047 /* Giv created by equivalent expression. */
7048 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7049 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7050 add_val, mult_val, ext_val, 0,
7051 &benefit, VOIDmode)))
7052 && src_reg == v->src_reg)
7054 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7055 benefit += libcall_benefit (p);
7057 count--;
7058 v->mult_val = *mult_val;
7059 v->add_val = *add_val;
7060 v->benefit += benefit;
7062 else if (code != NOTE)
7064 /* Allow insns that set something other than this giv to a
7065 constant. Such insns are needed on machines which cannot
7066 include long constants and should not disqualify a giv. */
7067 if (code == INSN
7068 && (set = single_set (p))
7069 && SET_DEST (set) != dest_reg
7070 && CONSTANT_P (SET_SRC (set)))
7071 continue;
7073 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7074 return 0;
7078 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7079 *last_consec_insn = p;
7080 return v->benefit;
7083 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7084 represented by G1. If no such expression can be found, or it is clear that
7085 it cannot possibly be a valid address, 0 is returned.
7087 To perform the computation, we note that
7088 G1 = x * v + a and
7089 G2 = y * v + b
7090 where `v' is the biv.
7092 So G2 = (y/b) * G1 + (b - a*y/x).
7094 Note that MULT = y/x.
7096 Update: A and B are now allowed to be additive expressions such that
7097 B contains all variables in A. That is, computing B-A will not require
7098 subtracting variables. */
7100 static rtx
7101 express_from_1 (a, b, mult)
7102 rtx a, b, mult;
7104 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7106 if (mult == const0_rtx)
7107 return b;
7109 /* If MULT is not 1, we cannot handle A with non-constants, since we
7110 would then be required to subtract multiples of the registers in A.
7111 This is theoretically possible, and may even apply to some Fortran
7112 constructs, but it is a lot of work and we do not attempt it here. */
7114 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7115 return NULL_RTX;
7117 /* In general these structures are sorted top to bottom (down the PLUS
7118 chain), but not left to right across the PLUS. If B is a higher
7119 order giv than A, we can strip one level and recurse. If A is higher
7120 order, we'll eventually bail out, but won't know that until the end.
7121 If they are the same, we'll strip one level around this loop. */
7123 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7125 rtx ra, rb, oa, ob, tmp;
7127 ra = XEXP (a, 0), oa = XEXP (a, 1);
7128 if (GET_CODE (ra) == PLUS)
7129 tmp = ra, ra = oa, oa = tmp;
7131 rb = XEXP (b, 0), ob = XEXP (b, 1);
7132 if (GET_CODE (rb) == PLUS)
7133 tmp = rb, rb = ob, ob = tmp;
7135 if (rtx_equal_p (ra, rb))
7136 /* We matched: remove one reg completely. */
7137 a = oa, b = ob;
7138 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7139 /* An alternate match. */
7140 a = oa, b = rb;
7141 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7142 /* An alternate match. */
7143 a = ra, b = ob;
7144 else
7146 /* Indicates an extra register in B. Strip one level from B and
7147 recurse, hoping B was the higher order expression. */
7148 ob = express_from_1 (a, ob, mult);
7149 if (ob == NULL_RTX)
7150 return NULL_RTX;
7151 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7155 /* Here we are at the last level of A, go through the cases hoping to
7156 get rid of everything but a constant. */
7158 if (GET_CODE (a) == PLUS)
7160 rtx ra, oa;
7162 ra = XEXP (a, 0), oa = XEXP (a, 1);
7163 if (rtx_equal_p (oa, b))
7164 oa = ra;
7165 else if (!rtx_equal_p (ra, b))
7166 return NULL_RTX;
7168 if (GET_CODE (oa) != CONST_INT)
7169 return NULL_RTX;
7171 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7173 else if (GET_CODE (a) == CONST_INT)
7175 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7177 else if (CONSTANT_P (a))
7179 enum machine_mode mode_a = GET_MODE (a);
7180 enum machine_mode mode_b = GET_MODE (b);
7181 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7182 return simplify_gen_binary (MINUS, mode, b, a);
7184 else if (GET_CODE (b) == PLUS)
7186 if (rtx_equal_p (a, XEXP (b, 0)))
7187 return XEXP (b, 1);
7188 else if (rtx_equal_p (a, XEXP (b, 1)))
7189 return XEXP (b, 0);
7190 else
7191 return NULL_RTX;
7193 else if (rtx_equal_p (a, b))
7194 return const0_rtx;
7196 return NULL_RTX;
7200 express_from (g1, g2)
7201 struct induction *g1, *g2;
7203 rtx mult, add;
7205 /* The value that G1 will be multiplied by must be a constant integer. Also,
7206 the only chance we have of getting a valid address is if b*c/a (see above
7207 for notation) is also an integer. */
7208 if (GET_CODE (g1->mult_val) == CONST_INT
7209 && GET_CODE (g2->mult_val) == CONST_INT)
7211 if (g1->mult_val == const0_rtx
7212 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7213 return NULL_RTX;
7214 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7216 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7217 mult = const1_rtx;
7218 else
7220 /* ??? Find out if the one is a multiple of the other? */
7221 return NULL_RTX;
7224 add = express_from_1 (g1->add_val, g2->add_val, mult);
7225 if (add == NULL_RTX)
7227 /* Failed. If we've got a multiplication factor between G1 and G2,
7228 scale G1's addend and try again. */
7229 if (INTVAL (mult) > 1)
7231 rtx g1_add_val = g1->add_val;
7232 if (GET_CODE (g1_add_val) == MULT
7233 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7235 HOST_WIDE_INT m;
7236 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7237 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7238 XEXP (g1_add_val, 0), GEN_INT (m));
7240 else
7242 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7243 mult);
7246 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7249 if (add == NULL_RTX)
7250 return NULL_RTX;
7252 /* Form simplified final result. */
7253 if (mult == const0_rtx)
7254 return add;
7255 else if (mult == const1_rtx)
7256 mult = g1->dest_reg;
7257 else
7258 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7260 if (add == const0_rtx)
7261 return mult;
7262 else
7264 if (GET_CODE (add) == PLUS
7265 && CONSTANT_P (XEXP (add, 1)))
7267 rtx tem = XEXP (add, 1);
7268 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7269 add = tem;
7272 return gen_rtx_PLUS (g2->mode, mult, add);
7276 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7277 represented by G1. This indicates that G2 should be combined with G1 and
7278 that G2 can use (either directly or via an address expression) a register
7279 used to represent G1. */
7281 static rtx
7282 combine_givs_p (g1, g2)
7283 struct induction *g1, *g2;
7285 rtx comb, ret;
7287 /* With the introduction of ext dependent givs, we must care for modes.
7288 G2 must not use a wider mode than G1. */
7289 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7290 return NULL_RTX;
7292 ret = comb = express_from (g1, g2);
7293 if (comb == NULL_RTX)
7294 return NULL_RTX;
7295 if (g1->mode != g2->mode)
7296 ret = gen_lowpart (g2->mode, comb);
7298 /* If these givs are identical, they can be combined. We use the results
7299 of express_from because the addends are not in a canonical form, so
7300 rtx_equal_p is a weaker test. */
7301 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7302 combination to be the other way round. */
7303 if (comb == g1->dest_reg
7304 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7306 return ret;
7309 /* If G2 can be expressed as a function of G1 and that function is valid
7310 as an address and no more expensive than using a register for G2,
7311 the expression of G2 in terms of G1 can be used. */
7312 if (ret != NULL_RTX
7313 && g2->giv_type == DEST_ADDR
7314 && memory_address_p (GET_MODE (g2->mem), ret)
7315 /* ??? Looses, especially with -fforce-addr, where *g2->location
7316 will always be a register, and so anything more complicated
7317 gets discarded. */
7318 #if 0
7319 #ifdef ADDRESS_COST
7320 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7321 #else
7322 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7323 #endif
7324 #endif
7327 return ret;
7330 return NULL_RTX;
7333 /* Check each extension dependent giv in this class to see if its
7334 root biv is safe from wrapping in the interior mode, which would
7335 make the giv illegal. */
7337 static void
7338 check_ext_dependent_givs (bl, loop_info)
7339 struct iv_class *bl;
7340 struct loop_info *loop_info;
7342 int ze_ok = 0, se_ok = 0, info_ok = 0;
7343 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7344 HOST_WIDE_INT start_val;
7345 unsigned HOST_WIDE_INT u_end_val = 0;
7346 unsigned HOST_WIDE_INT u_start_val = 0;
7347 rtx incr = pc_rtx;
7348 struct induction *v;
7350 /* Make sure the iteration data is available. We must have
7351 constants in order to be certain of no overflow. */
7352 /* ??? An unknown iteration count with an increment of +-1
7353 combined with friendly exit tests of against an invariant
7354 value is also ameanable to optimization. Not implemented. */
7355 if (loop_info->n_iterations > 0
7356 && bl->initial_value
7357 && GET_CODE (bl->initial_value) == CONST_INT
7358 && (incr = biv_total_increment (bl))
7359 && GET_CODE (incr) == CONST_INT
7360 /* Make sure the host can represent the arithmetic. */
7361 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7363 unsigned HOST_WIDE_INT abs_incr, total_incr;
7364 HOST_WIDE_INT s_end_val;
7365 int neg_incr;
7367 info_ok = 1;
7368 start_val = INTVAL (bl->initial_value);
7369 u_start_val = start_val;
7371 neg_incr = 0, abs_incr = INTVAL (incr);
7372 if (INTVAL (incr) < 0)
7373 neg_incr = 1, abs_incr = -abs_incr;
7374 total_incr = abs_incr * loop_info->n_iterations;
7376 /* Check for host arithmatic overflow. */
7377 if (total_incr / loop_info->n_iterations == abs_incr)
7379 unsigned HOST_WIDE_INT u_max;
7380 HOST_WIDE_INT s_max;
7382 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7383 s_end_val = u_end_val;
7384 u_max = GET_MODE_MASK (biv_mode);
7385 s_max = u_max >> 1;
7387 /* Check zero extension of biv ok. */
7388 if (start_val >= 0
7389 /* Check for host arithmatic overflow. */
7390 && (neg_incr
7391 ? u_end_val < u_start_val
7392 : u_end_val > u_start_val)
7393 /* Check for target arithmetic overflow. */
7394 && (neg_incr
7395 ? 1 /* taken care of with host overflow */
7396 : u_end_val <= u_max))
7398 ze_ok = 1;
7401 /* Check sign extension of biv ok. */
7402 /* ??? While it is true that overflow with signed and pointer
7403 arithmetic is undefined, I fear too many programmers don't
7404 keep this fact in mind -- myself included on occasion.
7405 So leave alone with the signed overflow optimizations. */
7406 if (start_val >= -s_max - 1
7407 /* Check for host arithmatic overflow. */
7408 && (neg_incr
7409 ? s_end_val < start_val
7410 : s_end_val > start_val)
7411 /* Check for target arithmetic overflow. */
7412 && (neg_incr
7413 ? s_end_val >= -s_max - 1
7414 : s_end_val <= s_max))
7416 se_ok = 1;
7421 /* Invalidate givs that fail the tests. */
7422 for (v = bl->giv; v; v = v->next_iv)
7423 if (v->ext_dependent)
7425 enum rtx_code code = GET_CODE (v->ext_dependent);
7426 int ok = 0;
7428 switch (code)
7430 case SIGN_EXTEND:
7431 ok = se_ok;
7432 break;
7433 case ZERO_EXTEND:
7434 ok = ze_ok;
7435 break;
7437 case TRUNCATE:
7438 /* We don't know whether this value is being used as either
7439 signed or unsigned, so to safely truncate we must satisfy
7440 both. The initial check here verifies the BIV itself;
7441 once that is successful we may check its range wrt the
7442 derived GIV. */
7443 if (se_ok && ze_ok)
7445 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7446 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7448 /* We know from the above that both endpoints are nonnegative,
7449 and that there is no wrapping. Verify that both endpoints
7450 are within the (signed) range of the outer mode. */
7451 if (u_start_val <= max && u_end_val <= max)
7452 ok = 1;
7454 break;
7456 default:
7457 abort ();
7460 if (ok)
7462 if (loop_dump_stream)
7464 fprintf (loop_dump_stream,
7465 "Verified ext dependent giv at %d of reg %d\n",
7466 INSN_UID (v->insn), bl->regno);
7469 else
7471 if (loop_dump_stream)
7473 const char *why;
7475 if (info_ok)
7476 why = "biv iteration values overflowed";
7477 else
7479 if (incr == pc_rtx)
7480 incr = biv_total_increment (bl);
7481 if (incr == const1_rtx)
7482 why = "biv iteration info incomplete; incr by 1";
7483 else
7484 why = "biv iteration info incomplete";
7487 fprintf (loop_dump_stream,
7488 "Failed ext dependent giv at %d, %s\n",
7489 INSN_UID (v->insn), why);
7491 v->ignore = 1;
7492 bl->all_reduced = 0;
7497 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7500 extend_value_for_giv (v, value)
7501 struct induction *v;
7502 rtx value;
7504 rtx ext_dep = v->ext_dependent;
7506 if (! ext_dep)
7507 return value;
7509 /* Recall that check_ext_dependent_givs verified that the known bounds
7510 of a biv did not overflow or wrap with respect to the extension for
7511 the giv. Therefore, constants need no additional adjustment. */
7512 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7513 return value;
7515 /* Otherwise, we must adjust the value to compensate for the
7516 differing modes of the biv and the giv. */
7517 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7520 struct combine_givs_stats
7522 int giv_number;
7523 int total_benefit;
7526 static int
7527 cmp_combine_givs_stats (xp, yp)
7528 const PTR xp;
7529 const PTR yp;
7531 const struct combine_givs_stats * const x =
7532 (const struct combine_givs_stats *) xp;
7533 const struct combine_givs_stats * const y =
7534 (const struct combine_givs_stats *) yp;
7535 int d;
7536 d = y->total_benefit - x->total_benefit;
7537 /* Stabilize the sort. */
7538 if (!d)
7539 d = x->giv_number - y->giv_number;
7540 return d;
7543 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7544 any other. If so, point SAME to the giv combined with and set NEW_REG to
7545 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7546 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7548 static void
7549 combine_givs (regs, bl)
7550 struct loop_regs *regs;
7551 struct iv_class *bl;
7553 /* Additional benefit to add for being combined multiple times. */
7554 const int extra_benefit = 3;
7556 struct induction *g1, *g2, **giv_array;
7557 int i, j, k, giv_count;
7558 struct combine_givs_stats *stats;
7559 rtx *can_combine;
7561 /* Count givs, because bl->giv_count is incorrect here. */
7562 giv_count = 0;
7563 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7564 if (!g1->ignore)
7565 giv_count++;
7567 giv_array
7568 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7569 i = 0;
7570 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7571 if (!g1->ignore)
7572 giv_array[i++] = g1;
7574 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7575 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7577 for (i = 0; i < giv_count; i++)
7579 int this_benefit;
7580 rtx single_use;
7582 g1 = giv_array[i];
7583 stats[i].giv_number = i;
7585 /* If a DEST_REG GIV is used only once, do not allow it to combine
7586 with anything, for in doing so we will gain nothing that cannot
7587 be had by simply letting the GIV with which we would have combined
7588 to be reduced on its own. The losage shows up in particular with
7589 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7590 be seen elsewhere as well. */
7591 if (g1->giv_type == DEST_REG
7592 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7593 && single_use != const0_rtx)
7594 continue;
7596 this_benefit = g1->benefit;
7597 /* Add an additional weight for zero addends. */
7598 if (g1->no_const_addval)
7599 this_benefit += 1;
7601 for (j = 0; j < giv_count; j++)
7603 rtx this_combine;
7605 g2 = giv_array[j];
7606 if (g1 != g2
7607 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7609 can_combine[i * giv_count + j] = this_combine;
7610 this_benefit += g2->benefit + extra_benefit;
7613 stats[i].total_benefit = this_benefit;
7616 /* Iterate, combining until we can't. */
7617 restart:
7618 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7620 if (loop_dump_stream)
7622 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7623 for (k = 0; k < giv_count; k++)
7625 g1 = giv_array[stats[k].giv_number];
7626 if (!g1->combined_with && !g1->same)
7627 fprintf (loop_dump_stream, " {%d, %d}",
7628 INSN_UID (giv_array[stats[k].giv_number]->insn),
7629 stats[k].total_benefit);
7631 putc ('\n', loop_dump_stream);
7634 for (k = 0; k < giv_count; k++)
7636 int g1_add_benefit = 0;
7638 i = stats[k].giv_number;
7639 g1 = giv_array[i];
7641 /* If it has already been combined, skip. */
7642 if (g1->combined_with || g1->same)
7643 continue;
7645 for (j = 0; j < giv_count; j++)
7647 g2 = giv_array[j];
7648 if (g1 != g2 && can_combine[i * giv_count + j]
7649 /* If it has already been combined, skip. */
7650 && ! g2->same && ! g2->combined_with)
7652 int l;
7654 g2->new_reg = can_combine[i * giv_count + j];
7655 g2->same = g1;
7656 /* For destination, we now may replace by mem expression instead
7657 of register. This changes the costs considerably, so add the
7658 compensation. */
7659 if (g2->giv_type == DEST_ADDR)
7660 g2->benefit = (g2->benefit + reg_address_cost
7661 - address_cost (g2->new_reg,
7662 GET_MODE (g2->mem)));
7663 g1->combined_with++;
7664 g1->lifetime += g2->lifetime;
7666 g1_add_benefit += g2->benefit;
7668 /* ??? The new final_[bg]iv_value code does a much better job
7669 of finding replaceable giv's, and hence this code may no
7670 longer be necessary. */
7671 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7672 g1_add_benefit -= copy_cost;
7674 /* To help optimize the next set of combinations, remove
7675 this giv from the benefits of other potential mates. */
7676 for (l = 0; l < giv_count; ++l)
7678 int m = stats[l].giv_number;
7679 if (can_combine[m * giv_count + j])
7680 stats[l].total_benefit -= g2->benefit + extra_benefit;
7683 if (loop_dump_stream)
7684 fprintf (loop_dump_stream,
7685 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7686 INSN_UID (g2->insn), INSN_UID (g1->insn),
7687 g1->benefit, g1_add_benefit, g1->lifetime);
7691 /* To help optimize the next set of combinations, remove
7692 this giv from the benefits of other potential mates. */
7693 if (g1->combined_with)
7695 for (j = 0; j < giv_count; ++j)
7697 int m = stats[j].giv_number;
7698 if (can_combine[m * giv_count + i])
7699 stats[j].total_benefit -= g1->benefit + extra_benefit;
7702 g1->benefit += g1_add_benefit;
7704 /* We've finished with this giv, and everything it touched.
7705 Restart the combination so that proper weights for the
7706 rest of the givs are properly taken into account. */
7707 /* ??? Ideally we would compact the arrays at this point, so
7708 as to not cover old ground. But sanely compacting
7709 can_combine is tricky. */
7710 goto restart;
7714 /* Clean up. */
7715 free (stats);
7716 free (can_combine);
7719 /* Generate sequence for REG = B * M + A. */
7721 static rtx
7722 gen_add_mult (b, m, a, reg)
7723 rtx b; /* initial value of basic induction variable */
7724 rtx m; /* multiplicative constant */
7725 rtx a; /* additive constant */
7726 rtx reg; /* destination register */
7728 rtx seq;
7729 rtx result;
7731 start_sequence ();
7732 /* Use unsigned arithmetic. */
7733 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7734 if (reg != result)
7735 emit_move_insn (reg, result);
7736 seq = get_insns ();
7737 end_sequence ();
7739 return seq;
7743 /* Update registers created in insn sequence SEQ. */
7745 static void
7746 loop_regs_update (loop, seq)
7747 const struct loop *loop ATTRIBUTE_UNUSED;
7748 rtx seq;
7750 rtx insn;
7752 /* Update register info for alias analysis. */
7754 if (seq == NULL_RTX)
7755 return;
7757 if (INSN_P (seq))
7759 insn = seq;
7760 while (insn != NULL_RTX)
7762 rtx set = single_set (insn);
7764 if (set && GET_CODE (SET_DEST (set)) == REG)
7765 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7767 insn = NEXT_INSN (insn);
7770 else if (GET_CODE (seq) == SET
7771 && GET_CODE (SET_DEST (seq)) == REG)
7772 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7776 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7778 void
7779 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7780 const struct loop *loop;
7781 rtx b; /* initial value of basic induction variable */
7782 rtx m; /* multiplicative constant */
7783 rtx a; /* additive constant */
7784 rtx reg; /* destination register */
7785 basic_block before_bb;
7786 rtx before_insn;
7788 rtx seq;
7790 if (! before_insn)
7792 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7793 return;
7796 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7797 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7799 /* Increase the lifetime of any invariants moved further in code. */
7800 update_reg_last_use (a, before_insn);
7801 update_reg_last_use (b, before_insn);
7802 update_reg_last_use (m, before_insn);
7804 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7806 /* It is possible that the expansion created lots of new registers.
7807 Iterate over the sequence we just created and record them all. */
7808 loop_regs_update (loop, seq);
7812 /* Emit insns in loop pre-header to set REG = B * M + A. */
7814 void
7815 loop_iv_add_mult_sink (loop, b, m, a, reg)
7816 const struct loop *loop;
7817 rtx b; /* initial value of basic induction variable */
7818 rtx m; /* multiplicative constant */
7819 rtx a; /* additive constant */
7820 rtx reg; /* destination register */
7822 rtx seq;
7824 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7825 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7827 /* Increase the lifetime of any invariants moved further in code.
7828 ???? Is this really necessary? */
7829 update_reg_last_use (a, loop->sink);
7830 update_reg_last_use (b, loop->sink);
7831 update_reg_last_use (m, loop->sink);
7833 loop_insn_sink (loop, seq);
7835 /* It is possible that the expansion created lots of new registers.
7836 Iterate over the sequence we just created and record them all. */
7837 loop_regs_update (loop, seq);
7841 /* Emit insns after loop to set REG = B * M + A. */
7843 void
7844 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7845 const struct loop *loop;
7846 rtx b; /* initial value of basic induction variable */
7847 rtx m; /* multiplicative constant */
7848 rtx a; /* additive constant */
7849 rtx reg; /* destination register */
7851 rtx seq;
7853 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7854 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7856 loop_insn_hoist (loop, seq);
7858 /* It is possible that the expansion created lots of new registers.
7859 Iterate over the sequence we just created and record them all. */
7860 loop_regs_update (loop, seq);
7865 /* Similar to gen_add_mult, but compute cost rather than generating
7866 sequence. */
7868 static int
7869 iv_add_mult_cost (b, m, a, reg)
7870 rtx b; /* initial value of basic induction variable */
7871 rtx m; /* multiplicative constant */
7872 rtx a; /* additive constant */
7873 rtx reg; /* destination register */
7875 int cost = 0;
7876 rtx last, result;
7878 start_sequence ();
7879 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7880 if (reg != result)
7881 emit_move_insn (reg, result);
7882 last = get_last_insn ();
7883 while (last)
7885 rtx t = single_set (last);
7886 if (t)
7887 cost += rtx_cost (SET_SRC (t), SET);
7888 last = PREV_INSN (last);
7890 end_sequence ();
7891 return cost;
7894 /* Test whether A * B can be computed without
7895 an actual multiply insn. Value is 1 if so.
7897 ??? This function stinks because it generates a ton of wasted RTL
7898 ??? and as a result fragments GC memory to no end. There are other
7899 ??? places in the compiler which are invoked a lot and do the same
7900 ??? thing, generate wasted RTL just to see if something is possible. */
7902 static int
7903 product_cheap_p (a, b)
7904 rtx a;
7905 rtx b;
7907 rtx tmp;
7908 int win, n_insns;
7910 /* If only one is constant, make it B. */
7911 if (GET_CODE (a) == CONST_INT)
7912 tmp = a, a = b, b = tmp;
7914 /* If first constant, both constant, so don't need multiply. */
7915 if (GET_CODE (a) == CONST_INT)
7916 return 1;
7918 /* If second not constant, neither is constant, so would need multiply. */
7919 if (GET_CODE (b) != CONST_INT)
7920 return 0;
7922 /* One operand is constant, so might not need multiply insn. Generate the
7923 code for the multiply and see if a call or multiply, or long sequence
7924 of insns is generated. */
7926 start_sequence ();
7927 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7928 tmp = get_insns ();
7929 end_sequence ();
7931 win = 1;
7932 if (INSN_P (tmp))
7934 n_insns = 0;
7935 while (tmp != NULL_RTX)
7937 rtx next = NEXT_INSN (tmp);
7939 if (++n_insns > 3
7940 || GET_CODE (tmp) != INSN
7941 || (GET_CODE (PATTERN (tmp)) == SET
7942 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7943 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7944 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7945 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7947 win = 0;
7948 break;
7951 tmp = next;
7954 else if (GET_CODE (tmp) == SET
7955 && GET_CODE (SET_SRC (tmp)) == MULT)
7956 win = 0;
7957 else if (GET_CODE (tmp) == PARALLEL
7958 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7959 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7960 win = 0;
7962 return win;
7965 /* Check to see if loop can be terminated by a "decrement and branch until
7966 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7967 Also try reversing an increment loop to a decrement loop
7968 to see if the optimization can be performed.
7969 Value is nonzero if optimization was performed. */
7971 /* This is useful even if the architecture doesn't have such an insn,
7972 because it might change a loops which increments from 0 to n to a loop
7973 which decrements from n to 0. A loop that decrements to zero is usually
7974 faster than one that increments from zero. */
7976 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7977 such as approx_final_value, biv_total_increment, loop_iterations, and
7978 final_[bg]iv_value. */
7980 static int
7981 check_dbra_loop (loop, insn_count)
7982 struct loop *loop;
7983 int insn_count;
7985 struct loop_info *loop_info = LOOP_INFO (loop);
7986 struct loop_regs *regs = LOOP_REGS (loop);
7987 struct loop_ivs *ivs = LOOP_IVS (loop);
7988 struct iv_class *bl;
7989 rtx reg;
7990 rtx jump_label;
7991 rtx final_value;
7992 rtx start_value;
7993 rtx new_add_val;
7994 rtx comparison;
7995 rtx before_comparison;
7996 rtx p;
7997 rtx jump;
7998 rtx first_compare;
7999 int compare_and_branch;
8000 rtx loop_start = loop->start;
8001 rtx loop_end = loop->end;
8003 /* If last insn is a conditional branch, and the insn before tests a
8004 register value, try to optimize it. Otherwise, we can't do anything. */
8006 jump = PREV_INSN (loop_end);
8007 comparison = get_condition_for_loop (loop, jump);
8008 if (comparison == 0)
8009 return 0;
8010 if (!onlyjump_p (jump))
8011 return 0;
8013 /* Try to compute whether the compare/branch at the loop end is one or
8014 two instructions. */
8015 get_condition (jump, &first_compare);
8016 if (first_compare == jump)
8017 compare_and_branch = 1;
8018 else if (first_compare == prev_nonnote_insn (jump))
8019 compare_and_branch = 2;
8020 else
8021 return 0;
8024 /* If more than one condition is present to control the loop, then
8025 do not proceed, as this function does not know how to rewrite
8026 loop tests with more than one condition.
8028 Look backwards from the first insn in the last comparison
8029 sequence and see if we've got another comparison sequence. */
8031 rtx jump1;
8032 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8033 if (GET_CODE (jump1) == JUMP_INSN)
8034 return 0;
8037 /* Check all of the bivs to see if the compare uses one of them.
8038 Skip biv's set more than once because we can't guarantee that
8039 it will be zero on the last iteration. Also skip if the biv is
8040 used between its update and the test insn. */
8042 for (bl = ivs->list; bl; bl = bl->next)
8044 if (bl->biv_count == 1
8045 && ! bl->biv->maybe_multiple
8046 && bl->biv->dest_reg == XEXP (comparison, 0)
8047 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8048 first_compare))
8049 break;
8052 if (! bl)
8053 return 0;
8055 /* Look for the case where the basic induction variable is always
8056 nonnegative, and equals zero on the last iteration.
8057 In this case, add a reg_note REG_NONNEG, which allows the
8058 m68k DBRA instruction to be used. */
8060 if (((GET_CODE (comparison) == GT
8061 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8062 && INTVAL (XEXP (comparison, 1)) == -1)
8063 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8064 && GET_CODE (bl->biv->add_val) == CONST_INT
8065 && INTVAL (bl->biv->add_val) < 0)
8067 /* Initial value must be greater than 0,
8068 init_val % -dec_value == 0 to ensure that it equals zero on
8069 the last iteration */
8071 if (GET_CODE (bl->initial_value) == CONST_INT
8072 && INTVAL (bl->initial_value) > 0
8073 && (INTVAL (bl->initial_value)
8074 % (-INTVAL (bl->biv->add_val))) == 0)
8076 /* register always nonnegative, add REG_NOTE to branch */
8077 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8078 REG_NOTES (jump)
8079 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8080 REG_NOTES (jump));
8081 bl->nonneg = 1;
8083 return 1;
8086 /* If the decrement is 1 and the value was tested as >= 0 before
8087 the loop, then we can safely optimize. */
8088 for (p = loop_start; p; p = PREV_INSN (p))
8090 if (GET_CODE (p) == CODE_LABEL)
8091 break;
8092 if (GET_CODE (p) != JUMP_INSN)
8093 continue;
8095 before_comparison = get_condition_for_loop (loop, p);
8096 if (before_comparison
8097 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8098 && GET_CODE (before_comparison) == LT
8099 && XEXP (before_comparison, 1) == const0_rtx
8100 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8101 && INTVAL (bl->biv->add_val) == -1)
8103 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8104 REG_NOTES (jump)
8105 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8106 REG_NOTES (jump));
8107 bl->nonneg = 1;
8109 return 1;
8113 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8114 && INTVAL (bl->biv->add_val) > 0)
8116 /* Try to change inc to dec, so can apply above optimization. */
8117 /* Can do this if:
8118 all registers modified are induction variables or invariant,
8119 all memory references have non-overlapping addresses
8120 (obviously true if only one write)
8121 allow 2 insns for the compare/jump at the end of the loop. */
8122 /* Also, we must avoid any instructions which use both the reversed
8123 biv and another biv. Such instructions will fail if the loop is
8124 reversed. We meet this condition by requiring that either
8125 no_use_except_counting is true, or else that there is only
8126 one biv. */
8127 int num_nonfixed_reads = 0;
8128 /* 1 if the iteration var is used only to count iterations. */
8129 int no_use_except_counting = 0;
8130 /* 1 if the loop has no memory store, or it has a single memory store
8131 which is reversible. */
8132 int reversible_mem_store = 1;
8134 if (bl->giv_count == 0
8135 && !loop->exit_count
8136 && !loop_info->has_multiple_exit_targets)
8138 rtx bivreg = regno_reg_rtx[bl->regno];
8139 struct iv_class *blt;
8141 /* If there are no givs for this biv, and the only exit is the
8142 fall through at the end of the loop, then
8143 see if perhaps there are no uses except to count. */
8144 no_use_except_counting = 1;
8145 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8146 if (INSN_P (p))
8148 rtx set = single_set (p);
8150 if (set && GET_CODE (SET_DEST (set)) == REG
8151 && REGNO (SET_DEST (set)) == bl->regno)
8152 /* An insn that sets the biv is okay. */
8154 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8155 || p == prev_nonnote_insn (loop_end))
8156 && reg_mentioned_p (bivreg, PATTERN (p)))
8158 /* If either of these insns uses the biv and sets a pseudo
8159 that has more than one usage, then the biv has uses
8160 other than counting since it's used to derive a value
8161 that is used more than one time. */
8162 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8163 regs);
8164 if (regs->multiple_uses)
8166 no_use_except_counting = 0;
8167 break;
8170 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8172 no_use_except_counting = 0;
8173 break;
8177 /* A biv has uses besides counting if it is used to set
8178 another biv. */
8179 for (blt = ivs->list; blt; blt = blt->next)
8180 if (blt->init_set
8181 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8183 no_use_except_counting = 0;
8184 break;
8188 if (no_use_except_counting)
8189 /* No need to worry about MEMs. */
8191 else if (loop_info->num_mem_sets <= 1)
8193 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8194 if (INSN_P (p))
8195 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8197 /* If the loop has a single store, and the destination address is
8198 invariant, then we can't reverse the loop, because this address
8199 might then have the wrong value at loop exit.
8200 This would work if the source was invariant also, however, in that
8201 case, the insn should have been moved out of the loop. */
8203 if (loop_info->num_mem_sets == 1)
8205 struct induction *v;
8207 /* If we could prove that each of the memory locations
8208 written to was different, then we could reverse the
8209 store -- but we don't presently have any way of
8210 knowing that. */
8211 reversible_mem_store = 0;
8213 /* If the store depends on a register that is set after the
8214 store, it depends on the initial value, and is thus not
8215 reversible. */
8216 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8218 if (v->giv_type == DEST_REG
8219 && reg_mentioned_p (v->dest_reg,
8220 PATTERN (loop_info->first_loop_store_insn))
8221 && loop_insn_first_p (loop_info->first_loop_store_insn,
8222 v->insn))
8223 reversible_mem_store = 0;
8227 else
8228 return 0;
8230 /* This code only acts for innermost loops. Also it simplifies
8231 the memory address check by only reversing loops with
8232 zero or one memory access.
8233 Two memory accesses could involve parts of the same array,
8234 and that can't be reversed.
8235 If the biv is used only for counting, than we don't need to worry
8236 about all these things. */
8238 if ((num_nonfixed_reads <= 1
8239 && ! loop_info->has_nonconst_call
8240 && ! loop_info->has_prefetch
8241 && ! loop_info->has_volatile
8242 && reversible_mem_store
8243 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8244 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8245 && (bl == ivs->list && bl->next == 0))
8246 || (no_use_except_counting && ! loop_info->has_prefetch))
8248 rtx tem;
8250 /* Loop can be reversed. */
8251 if (loop_dump_stream)
8252 fprintf (loop_dump_stream, "Can reverse loop\n");
8254 /* Now check other conditions:
8256 The increment must be a constant, as must the initial value,
8257 and the comparison code must be LT.
8259 This test can probably be improved since +/- 1 in the constant
8260 can be obtained by changing LT to LE and vice versa; this is
8261 confusing. */
8263 if (comparison
8264 /* for constants, LE gets turned into LT */
8265 && (GET_CODE (comparison) == LT
8266 || (GET_CODE (comparison) == LE
8267 && no_use_except_counting)))
8269 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8270 rtx initial_value, comparison_value;
8271 int nonneg = 0;
8272 enum rtx_code cmp_code;
8273 int comparison_const_width;
8274 unsigned HOST_WIDE_INT comparison_sign_mask;
8276 add_val = INTVAL (bl->biv->add_val);
8277 comparison_value = XEXP (comparison, 1);
8278 if (GET_MODE (comparison_value) == VOIDmode)
8279 comparison_const_width
8280 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8281 else
8282 comparison_const_width
8283 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8284 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8285 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8286 comparison_sign_mask
8287 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8289 /* If the comparison value is not a loop invariant, then we
8290 can not reverse this loop.
8292 ??? If the insns which initialize the comparison value as
8293 a whole compute an invariant result, then we could move
8294 them out of the loop and proceed with loop reversal. */
8295 if (! loop_invariant_p (loop, comparison_value))
8296 return 0;
8298 if (GET_CODE (comparison_value) == CONST_INT)
8299 comparison_val = INTVAL (comparison_value);
8300 initial_value = bl->initial_value;
8302 /* Normalize the initial value if it is an integer and
8303 has no other use except as a counter. This will allow
8304 a few more loops to be reversed. */
8305 if (no_use_except_counting
8306 && GET_CODE (comparison_value) == CONST_INT
8307 && GET_CODE (initial_value) == CONST_INT)
8309 comparison_val = comparison_val - INTVAL (bl->initial_value);
8310 /* The code below requires comparison_val to be a multiple
8311 of add_val in order to do the loop reversal, so
8312 round up comparison_val to a multiple of add_val.
8313 Since comparison_value is constant, we know that the
8314 current comparison code is LT. */
8315 comparison_val = comparison_val + add_val - 1;
8316 comparison_val
8317 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8318 /* We postpone overflow checks for COMPARISON_VAL here;
8319 even if there is an overflow, we might still be able to
8320 reverse the loop, if converting the loop exit test to
8321 NE is possible. */
8322 initial_value = const0_rtx;
8325 /* First check if we can do a vanilla loop reversal. */
8326 if (initial_value == const0_rtx
8327 /* If we have a decrement_and_branch_on_count,
8328 prefer the NE test, since this will allow that
8329 instruction to be generated. Note that we must
8330 use a vanilla loop reversal if the biv is used to
8331 calculate a giv or has a non-counting use. */
8332 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8333 && defined (HAVE_decrement_and_branch_on_count)
8334 && (! (add_val == 1 && loop->vtop
8335 && (bl->biv_count == 0
8336 || no_use_except_counting)))
8337 #endif
8338 && GET_CODE (comparison_value) == CONST_INT
8339 /* Now do postponed overflow checks on COMPARISON_VAL. */
8340 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8341 & comparison_sign_mask))
8343 /* Register will always be nonnegative, with value
8344 0 on last iteration */
8345 add_adjust = add_val;
8346 nonneg = 1;
8347 cmp_code = GE;
8349 else if (add_val == 1 && loop->vtop
8350 && (bl->biv_count == 0
8351 || no_use_except_counting))
8353 add_adjust = 0;
8354 cmp_code = NE;
8356 else
8357 return 0;
8359 if (GET_CODE (comparison) == LE)
8360 add_adjust -= add_val;
8362 /* If the initial value is not zero, or if the comparison
8363 value is not an exact multiple of the increment, then we
8364 can not reverse this loop. */
8365 if (initial_value == const0_rtx
8366 && GET_CODE (comparison_value) == CONST_INT)
8368 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8369 return 0;
8371 else
8373 if (! no_use_except_counting || add_val != 1)
8374 return 0;
8377 final_value = comparison_value;
8379 /* Reset these in case we normalized the initial value
8380 and comparison value above. */
8381 if (GET_CODE (comparison_value) == CONST_INT
8382 && GET_CODE (initial_value) == CONST_INT)
8384 comparison_value = GEN_INT (comparison_val);
8385 final_value
8386 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8388 bl->initial_value = initial_value;
8390 /* Save some info needed to produce the new insns. */
8391 reg = bl->biv->dest_reg;
8392 jump_label = condjump_label (PREV_INSN (loop_end));
8393 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8395 /* Set start_value; if this is not a CONST_INT, we need
8396 to generate a SUB.
8397 Initialize biv to start_value before loop start.
8398 The old initializing insn will be deleted as a
8399 dead store by flow.c. */
8400 if (initial_value == const0_rtx
8401 && GET_CODE (comparison_value) == CONST_INT)
8403 start_value = GEN_INT (comparison_val - add_adjust);
8404 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8406 else if (GET_CODE (initial_value) == CONST_INT)
8408 enum machine_mode mode = GET_MODE (reg);
8409 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8410 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8412 if (add_insn == 0)
8413 return 0;
8415 start_value
8416 = gen_rtx_PLUS (mode, comparison_value, offset);
8417 loop_insn_hoist (loop, add_insn);
8418 if (GET_CODE (comparison) == LE)
8419 final_value = gen_rtx_PLUS (mode, comparison_value,
8420 GEN_INT (add_val));
8422 else if (! add_adjust)
8424 enum machine_mode mode = GET_MODE (reg);
8425 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8426 initial_value);
8428 if (sub_insn == 0)
8429 return 0;
8430 start_value
8431 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8432 loop_insn_hoist (loop, sub_insn);
8434 else
8435 /* We could handle the other cases too, but it'll be
8436 better to have a testcase first. */
8437 return 0;
8439 /* We may not have a single insn which can increment a reg, so
8440 create a sequence to hold all the insns from expand_inc. */
8441 start_sequence ();
8442 expand_inc (reg, new_add_val);
8443 tem = get_insns ();
8444 end_sequence ();
8446 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8447 delete_insn (bl->biv->insn);
8449 /* Update biv info to reflect its new status. */
8450 bl->biv->insn = p;
8451 bl->initial_value = start_value;
8452 bl->biv->add_val = new_add_val;
8454 /* Update loop info. */
8455 loop_info->initial_value = reg;
8456 loop_info->initial_equiv_value = reg;
8457 loop_info->final_value = const0_rtx;
8458 loop_info->final_equiv_value = const0_rtx;
8459 loop_info->comparison_value = const0_rtx;
8460 loop_info->comparison_code = cmp_code;
8461 loop_info->increment = new_add_val;
8463 /* Inc LABEL_NUSES so that delete_insn will
8464 not delete the label. */
8465 LABEL_NUSES (XEXP (jump_label, 0))++;
8467 /* Emit an insn after the end of the loop to set the biv's
8468 proper exit value if it is used anywhere outside the loop. */
8469 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8470 || ! bl->init_insn
8471 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8472 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8474 /* Delete compare/branch at end of loop. */
8475 delete_related_insns (PREV_INSN (loop_end));
8476 if (compare_and_branch == 2)
8477 delete_related_insns (first_compare);
8479 /* Add new compare/branch insn at end of loop. */
8480 start_sequence ();
8481 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8482 GET_MODE (reg), 0,
8483 XEXP (jump_label, 0));
8484 tem = get_insns ();
8485 end_sequence ();
8486 emit_jump_insn_before (tem, loop_end);
8488 for (tem = PREV_INSN (loop_end);
8489 tem && GET_CODE (tem) != JUMP_INSN;
8490 tem = PREV_INSN (tem))
8493 if (tem)
8494 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8496 if (nonneg)
8498 if (tem)
8500 /* Increment of LABEL_NUSES done above. */
8501 /* Register is now always nonnegative,
8502 so add REG_NONNEG note to the branch. */
8503 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8504 REG_NOTES (tem));
8506 bl->nonneg = 1;
8509 /* No insn may reference both the reversed and another biv or it
8510 will fail (see comment near the top of the loop reversal
8511 code).
8512 Earlier on, we have verified that the biv has no use except
8513 counting, or it is the only biv in this function.
8514 However, the code that computes no_use_except_counting does
8515 not verify reg notes. It's possible to have an insn that
8516 references another biv, and has a REG_EQUAL note with an
8517 expression based on the reversed biv. To avoid this case,
8518 remove all REG_EQUAL notes based on the reversed biv
8519 here. */
8520 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8521 if (INSN_P (p))
8523 rtx *pnote;
8524 rtx set = single_set (p);
8525 /* If this is a set of a GIV based on the reversed biv, any
8526 REG_EQUAL notes should still be correct. */
8527 if (! set
8528 || GET_CODE (SET_DEST (set)) != REG
8529 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8530 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8531 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8532 for (pnote = &REG_NOTES (p); *pnote;)
8534 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8535 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8536 XEXP (*pnote, 0)))
8537 *pnote = XEXP (*pnote, 1);
8538 else
8539 pnote = &XEXP (*pnote, 1);
8543 /* Mark that this biv has been reversed. Each giv which depends
8544 on this biv, and which is also live past the end of the loop
8545 will have to be fixed up. */
8547 bl->reversed = 1;
8549 if (loop_dump_stream)
8551 fprintf (loop_dump_stream, "Reversed loop");
8552 if (bl->nonneg)
8553 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8554 else
8555 fprintf (loop_dump_stream, "\n");
8558 return 1;
8563 return 0;
8566 /* Verify whether the biv BL appears to be eliminable,
8567 based on the insns in the loop that refer to it.
8569 If ELIMINATE_P is non-zero, actually do the elimination.
8571 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8572 determine whether invariant insns should be placed inside or at the
8573 start of the loop. */
8575 static int
8576 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8577 const struct loop *loop;
8578 struct iv_class *bl;
8579 int eliminate_p;
8580 int threshold, insn_count;
8582 struct loop_ivs *ivs = LOOP_IVS (loop);
8583 rtx reg = bl->biv->dest_reg;
8584 rtx p;
8586 /* Scan all insns in the loop, stopping if we find one that uses the
8587 biv in a way that we cannot eliminate. */
8589 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8591 enum rtx_code code = GET_CODE (p);
8592 basic_block where_bb = 0;
8593 rtx where_insn = threshold >= insn_count ? 0 : p;
8595 /* If this is a libcall that sets a giv, skip ahead to its end. */
8596 if (GET_RTX_CLASS (code) == 'i')
8598 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8600 if (note)
8602 rtx last = XEXP (note, 0);
8603 rtx set = single_set (last);
8605 if (set && GET_CODE (SET_DEST (set)) == REG)
8607 unsigned int regno = REGNO (SET_DEST (set));
8609 if (regno < ivs->n_regs
8610 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8611 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8612 p = last;
8616 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8617 && reg_mentioned_p (reg, PATTERN (p))
8618 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8619 eliminate_p, where_bb, where_insn))
8621 if (loop_dump_stream)
8622 fprintf (loop_dump_stream,
8623 "Cannot eliminate biv %d: biv used in insn %d.\n",
8624 bl->regno, INSN_UID (p));
8625 break;
8629 if (p == loop->end)
8631 if (loop_dump_stream)
8632 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8633 bl->regno, eliminate_p ? "was" : "can be");
8634 return 1;
8637 return 0;
8640 /* INSN and REFERENCE are instructions in the same insn chain.
8641 Return non-zero if INSN is first. */
8644 loop_insn_first_p (insn, reference)
8645 rtx insn, reference;
8647 rtx p, q;
8649 for (p = insn, q = reference;;)
8651 /* Start with test for not first so that INSN == REFERENCE yields not
8652 first. */
8653 if (q == insn || ! p)
8654 return 0;
8655 if (p == reference || ! q)
8656 return 1;
8658 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8659 previous insn, hence the <= comparison below does not work if
8660 P is a note. */
8661 if (INSN_UID (p) < max_uid_for_loop
8662 && INSN_UID (q) < max_uid_for_loop
8663 && GET_CODE (p) != NOTE)
8664 return INSN_LUID (p) <= INSN_LUID (q);
8666 if (INSN_UID (p) >= max_uid_for_loop
8667 || GET_CODE (p) == NOTE)
8668 p = NEXT_INSN (p);
8669 if (INSN_UID (q) >= max_uid_for_loop)
8670 q = NEXT_INSN (q);
8674 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8675 the offset that we have to take into account due to auto-increment /
8676 div derivation is zero. */
8677 static int
8678 biv_elimination_giv_has_0_offset (biv, giv, insn)
8679 struct induction *biv, *giv;
8680 rtx insn;
8682 /* If the giv V had the auto-inc address optimization applied
8683 to it, and INSN occurs between the giv insn and the biv
8684 insn, then we'd have to adjust the value used here.
8685 This is rare, so we don't bother to make this possible. */
8686 if (giv->auto_inc_opt
8687 && ((loop_insn_first_p (giv->insn, insn)
8688 && loop_insn_first_p (insn, biv->insn))
8689 || (loop_insn_first_p (biv->insn, insn)
8690 && loop_insn_first_p (insn, giv->insn))))
8691 return 0;
8693 return 1;
8696 /* If BL appears in X (part of the pattern of INSN), see if we can
8697 eliminate its use. If so, return 1. If not, return 0.
8699 If BIV does not appear in X, return 1.
8701 If ELIMINATE_P is non-zero, actually do the elimination.
8702 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8703 Depending on how many items have been moved out of the loop, it
8704 will either be before INSN (when WHERE_INSN is non-zero) or at the
8705 start of the loop (when WHERE_INSN is zero). */
8707 static int
8708 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8709 const struct loop *loop;
8710 rtx x, insn;
8711 struct iv_class *bl;
8712 int eliminate_p;
8713 basic_block where_bb;
8714 rtx where_insn;
8716 enum rtx_code code = GET_CODE (x);
8717 rtx reg = bl->biv->dest_reg;
8718 enum machine_mode mode = GET_MODE (reg);
8719 struct induction *v;
8720 rtx arg, tem;
8721 #ifdef HAVE_cc0
8722 rtx new;
8723 #endif
8724 int arg_operand;
8725 const char *fmt;
8726 int i, j;
8728 switch (code)
8730 case REG:
8731 /* If we haven't already been able to do something with this BIV,
8732 we can't eliminate it. */
8733 if (x == reg)
8734 return 0;
8735 return 1;
8737 case SET:
8738 /* If this sets the BIV, it is not a problem. */
8739 if (SET_DEST (x) == reg)
8740 return 1;
8742 /* If this is an insn that defines a giv, it is also ok because
8743 it will go away when the giv is reduced. */
8744 for (v = bl->giv; v; v = v->next_iv)
8745 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8746 return 1;
8748 #ifdef HAVE_cc0
8749 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8751 /* Can replace with any giv that was reduced and
8752 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8753 Require a constant for MULT_VAL, so we know it's nonzero.
8754 ??? We disable this optimization to avoid potential
8755 overflows. */
8757 for (v = bl->giv; v; v = v->next_iv)
8758 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8759 && v->add_val == const0_rtx
8760 && ! v->ignore && ! v->maybe_dead && v->always_computable
8761 && v->mode == mode
8762 && 0)
8764 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8765 continue;
8767 if (! eliminate_p)
8768 return 1;
8770 /* If the giv has the opposite direction of change,
8771 then reverse the comparison. */
8772 if (INTVAL (v->mult_val) < 0)
8773 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8774 const0_rtx, v->new_reg);
8775 else
8776 new = v->new_reg;
8778 /* We can probably test that giv's reduced reg. */
8779 if (validate_change (insn, &SET_SRC (x), new, 0))
8780 return 1;
8783 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8784 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8785 Require a constant for MULT_VAL, so we know it's nonzero.
8786 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8787 overflow problem. */
8789 for (v = bl->giv; v; v = v->next_iv)
8790 if (GET_CODE (v->mult_val) == CONST_INT
8791 && v->mult_val != const0_rtx
8792 && ! v->ignore && ! v->maybe_dead && v->always_computable
8793 && v->mode == mode
8794 && (GET_CODE (v->add_val) == SYMBOL_REF
8795 || GET_CODE (v->add_val) == LABEL_REF
8796 || GET_CODE (v->add_val) == CONST
8797 || (GET_CODE (v->add_val) == REG
8798 && REG_POINTER (v->add_val))))
8800 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8801 continue;
8803 if (! eliminate_p)
8804 return 1;
8806 /* If the giv has the opposite direction of change,
8807 then reverse the comparison. */
8808 if (INTVAL (v->mult_val) < 0)
8809 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8810 v->new_reg);
8811 else
8812 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8813 copy_rtx (v->add_val));
8815 /* Replace biv with the giv's reduced register. */
8816 update_reg_last_use (v->add_val, insn);
8817 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8818 return 1;
8820 /* Insn doesn't support that constant or invariant. Copy it
8821 into a register (it will be a loop invariant.) */
8822 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8824 loop_insn_emit_before (loop, 0, where_insn,
8825 gen_move_insn (tem,
8826 copy_rtx (v->add_val)));
8828 /* Substitute the new register for its invariant value in
8829 the compare expression. */
8830 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8831 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8832 return 1;
8835 #endif
8836 break;
8838 case COMPARE:
8839 case EQ: case NE:
8840 case GT: case GE: case GTU: case GEU:
8841 case LT: case LE: case LTU: case LEU:
8842 /* See if either argument is the biv. */
8843 if (XEXP (x, 0) == reg)
8844 arg = XEXP (x, 1), arg_operand = 1;
8845 else if (XEXP (x, 1) == reg)
8846 arg = XEXP (x, 0), arg_operand = 0;
8847 else
8848 break;
8850 if (CONSTANT_P (arg))
8852 /* First try to replace with any giv that has constant positive
8853 mult_val and constant add_val. We might be able to support
8854 negative mult_val, but it seems complex to do it in general. */
8856 for (v = bl->giv; v; v = v->next_iv)
8857 if (GET_CODE (v->mult_val) == CONST_INT
8858 && INTVAL (v->mult_val) > 0
8859 && (GET_CODE (v->add_val) == SYMBOL_REF
8860 || GET_CODE (v->add_val) == LABEL_REF
8861 || GET_CODE (v->add_val) == CONST
8862 || (GET_CODE (v->add_val) == REG
8863 && REG_POINTER (v->add_val)))
8864 && ! v->ignore && ! v->maybe_dead && v->always_computable
8865 && v->mode == mode)
8867 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8868 continue;
8870 /* Don't eliminate if the linear combination that makes up
8871 the giv overflows when it is applied to ARG. */
8872 if (GET_CODE (arg) == CONST_INT)
8874 rtx add_val;
8876 if (GET_CODE (v->add_val) == CONST_INT)
8877 add_val = v->add_val;
8878 else
8879 add_val = const0_rtx;
8881 if (const_mult_add_overflow_p (arg, v->mult_val,
8882 add_val, mode, 1))
8883 continue;
8886 if (! eliminate_p)
8887 return 1;
8889 /* Replace biv with the giv's reduced reg. */
8890 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8892 /* If all constants are actually constant integers and
8893 the derived constant can be directly placed in the COMPARE,
8894 do so. */
8895 if (GET_CODE (arg) == CONST_INT
8896 && GET_CODE (v->add_val) == CONST_INT)
8898 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8899 v->add_val, mode, 1);
8901 else
8903 /* Otherwise, load it into a register. */
8904 tem = gen_reg_rtx (mode);
8905 loop_iv_add_mult_emit_before (loop, arg,
8906 v->mult_val, v->add_val,
8907 tem, where_bb, where_insn);
8910 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8912 if (apply_change_group ())
8913 return 1;
8916 /* Look for giv with positive constant mult_val and nonconst add_val.
8917 Insert insns to calculate new compare value.
8918 ??? Turn this off due to possible overflow. */
8920 for (v = bl->giv; v; v = v->next_iv)
8921 if (GET_CODE (v->mult_val) == CONST_INT
8922 && INTVAL (v->mult_val) > 0
8923 && ! v->ignore && ! v->maybe_dead && v->always_computable
8924 && v->mode == mode
8925 && 0)
8927 rtx tem;
8929 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8930 continue;
8932 if (! eliminate_p)
8933 return 1;
8935 tem = gen_reg_rtx (mode);
8937 /* Replace biv with giv's reduced register. */
8938 validate_change (insn, &XEXP (x, 1 - arg_operand),
8939 v->new_reg, 1);
8941 /* Compute value to compare against. */
8942 loop_iv_add_mult_emit_before (loop, arg,
8943 v->mult_val, v->add_val,
8944 tem, where_bb, where_insn);
8945 /* Use it in this insn. */
8946 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8947 if (apply_change_group ())
8948 return 1;
8951 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8953 if (loop_invariant_p (loop, arg) == 1)
8955 /* Look for giv with constant positive mult_val and nonconst
8956 add_val. Insert insns to compute new compare value.
8957 ??? Turn this off due to possible overflow. */
8959 for (v = bl->giv; v; v = v->next_iv)
8960 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8961 && ! v->ignore && ! v->maybe_dead && v->always_computable
8962 && v->mode == mode
8963 && 0)
8965 rtx tem;
8967 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8968 continue;
8970 if (! eliminate_p)
8971 return 1;
8973 tem = gen_reg_rtx (mode);
8975 /* Replace biv with giv's reduced register. */
8976 validate_change (insn, &XEXP (x, 1 - arg_operand),
8977 v->new_reg, 1);
8979 /* Compute value to compare against. */
8980 loop_iv_add_mult_emit_before (loop, arg,
8981 v->mult_val, v->add_val,
8982 tem, where_bb, where_insn);
8983 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8984 if (apply_change_group ())
8985 return 1;
8989 /* This code has problems. Basically, you can't know when
8990 seeing if we will eliminate BL, whether a particular giv
8991 of ARG will be reduced. If it isn't going to be reduced,
8992 we can't eliminate BL. We can try forcing it to be reduced,
8993 but that can generate poor code.
8995 The problem is that the benefit of reducing TV, below should
8996 be increased if BL can actually be eliminated, but this means
8997 we might have to do a topological sort of the order in which
8998 we try to process biv. It doesn't seem worthwhile to do
8999 this sort of thing now. */
9001 #if 0
9002 /* Otherwise the reg compared with had better be a biv. */
9003 if (GET_CODE (arg) != REG
9004 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9005 return 0;
9007 /* Look for a pair of givs, one for each biv,
9008 with identical coefficients. */
9009 for (v = bl->giv; v; v = v->next_iv)
9011 struct induction *tv;
9013 if (v->ignore || v->maybe_dead || v->mode != mode)
9014 continue;
9016 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9017 tv = tv->next_iv)
9018 if (! tv->ignore && ! tv->maybe_dead
9019 && rtx_equal_p (tv->mult_val, v->mult_val)
9020 && rtx_equal_p (tv->add_val, v->add_val)
9021 && tv->mode == mode)
9023 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9024 continue;
9026 if (! eliminate_p)
9027 return 1;
9029 /* Replace biv with its giv's reduced reg. */
9030 XEXP (x, 1 - arg_operand) = v->new_reg;
9031 /* Replace other operand with the other giv's
9032 reduced reg. */
9033 XEXP (x, arg_operand) = tv->new_reg;
9034 return 1;
9037 #endif
9040 /* If we get here, the biv can't be eliminated. */
9041 return 0;
9043 case MEM:
9044 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9045 biv is used in it, since it will be replaced. */
9046 for (v = bl->giv; v; v = v->next_iv)
9047 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9048 return 1;
9049 break;
9051 default:
9052 break;
9055 /* See if any subexpression fails elimination. */
9056 fmt = GET_RTX_FORMAT (code);
9057 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9059 switch (fmt[i])
9061 case 'e':
9062 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9063 eliminate_p, where_bb, where_insn))
9064 return 0;
9065 break;
9067 case 'E':
9068 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9069 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9070 eliminate_p, where_bb, where_insn))
9071 return 0;
9072 break;
9076 return 1;
9079 /* Return nonzero if the last use of REG
9080 is in an insn following INSN in the same basic block. */
9082 static int
9083 last_use_this_basic_block (reg, insn)
9084 rtx reg;
9085 rtx insn;
9087 rtx n;
9088 for (n = insn;
9089 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9090 n = NEXT_INSN (n))
9092 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9093 return 1;
9095 return 0;
9098 /* Called via `note_stores' to record the initial value of a biv. Here we
9099 just record the location of the set and process it later. */
9101 static void
9102 record_initial (dest, set, data)
9103 rtx dest;
9104 rtx set;
9105 void *data ATTRIBUTE_UNUSED;
9107 struct loop_ivs *ivs = (struct loop_ivs *) data;
9108 struct iv_class *bl;
9110 if (GET_CODE (dest) != REG
9111 || REGNO (dest) >= ivs->n_regs
9112 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9113 return;
9115 bl = REG_IV_CLASS (ivs, REGNO (dest));
9117 /* If this is the first set found, record it. */
9118 if (bl->init_insn == 0)
9120 bl->init_insn = note_insn;
9121 bl->init_set = set;
9125 /* If any of the registers in X are "old" and currently have a last use earlier
9126 than INSN, update them to have a last use of INSN. Their actual last use
9127 will be the previous insn but it will not have a valid uid_luid so we can't
9128 use it. X must be a source expression only. */
9130 static void
9131 update_reg_last_use (x, insn)
9132 rtx x;
9133 rtx insn;
9135 /* Check for the case where INSN does not have a valid luid. In this case,
9136 there is no need to modify the regno_last_uid, as this can only happen
9137 when code is inserted after the loop_end to set a pseudo's final value,
9138 and hence this insn will never be the last use of x.
9139 ???? This comment is not correct. See for example loop_givs_reduce.
9140 This may insert an insn before another new insn. */
9141 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9142 && INSN_UID (insn) < max_uid_for_loop
9143 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9145 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9147 else
9149 int i, j;
9150 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9151 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9153 if (fmt[i] == 'e')
9154 update_reg_last_use (XEXP (x, i), insn);
9155 else if (fmt[i] == 'E')
9156 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9157 update_reg_last_use (XVECEXP (x, i, j), insn);
9162 /* Given an insn INSN and condition COND, return the condition in a
9163 canonical form to simplify testing by callers. Specifically:
9165 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9166 (2) Both operands will be machine operands; (cc0) will have been replaced.
9167 (3) If an operand is a constant, it will be the second operand.
9168 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9169 for GE, GEU, and LEU.
9171 If the condition cannot be understood, or is an inequality floating-point
9172 comparison which needs to be reversed, 0 will be returned.
9174 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9176 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9177 insn used in locating the condition was found. If a replacement test
9178 of the condition is desired, it should be placed in front of that
9179 insn and we will be sure that the inputs are still valid.
9181 If WANT_REG is non-zero, we wish the condition to be relative to that
9182 register, if possible. Therefore, do not canonicalize the condition
9183 further. */
9186 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9187 rtx insn;
9188 rtx cond;
9189 int reverse;
9190 rtx *earliest;
9191 rtx want_reg;
9193 enum rtx_code code;
9194 rtx prev = insn;
9195 rtx set;
9196 rtx tem;
9197 rtx op0, op1;
9198 int reverse_code = 0;
9199 enum machine_mode mode;
9201 code = GET_CODE (cond);
9202 mode = GET_MODE (cond);
9203 op0 = XEXP (cond, 0);
9204 op1 = XEXP (cond, 1);
9206 if (reverse)
9207 code = reversed_comparison_code (cond, insn);
9208 if (code == UNKNOWN)
9209 return 0;
9211 if (earliest)
9212 *earliest = insn;
9214 /* If we are comparing a register with zero, see if the register is set
9215 in the previous insn to a COMPARE or a comparison operation. Perform
9216 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9217 in cse.c */
9219 while (GET_RTX_CLASS (code) == '<'
9220 && op1 == CONST0_RTX (GET_MODE (op0))
9221 && op0 != want_reg)
9223 /* Set non-zero when we find something of interest. */
9224 rtx x = 0;
9226 #ifdef HAVE_cc0
9227 /* If comparison with cc0, import actual comparison from compare
9228 insn. */
9229 if (op0 == cc0_rtx)
9231 if ((prev = prev_nonnote_insn (prev)) == 0
9232 || GET_CODE (prev) != INSN
9233 || (set = single_set (prev)) == 0
9234 || SET_DEST (set) != cc0_rtx)
9235 return 0;
9237 op0 = SET_SRC (set);
9238 op1 = CONST0_RTX (GET_MODE (op0));
9239 if (earliest)
9240 *earliest = prev;
9242 #endif
9244 /* If this is a COMPARE, pick up the two things being compared. */
9245 if (GET_CODE (op0) == COMPARE)
9247 op1 = XEXP (op0, 1);
9248 op0 = XEXP (op0, 0);
9249 continue;
9251 else if (GET_CODE (op0) != REG)
9252 break;
9254 /* Go back to the previous insn. Stop if it is not an INSN. We also
9255 stop if it isn't a single set or if it has a REG_INC note because
9256 we don't want to bother dealing with it. */
9258 if ((prev = prev_nonnote_insn (prev)) == 0
9259 || GET_CODE (prev) != INSN
9260 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9261 break;
9263 set = set_of (op0, prev);
9265 if (set
9266 && (GET_CODE (set) != SET
9267 || !rtx_equal_p (SET_DEST (set), op0)))
9268 break;
9270 /* If this is setting OP0, get what it sets it to if it looks
9271 relevant. */
9272 if (set)
9274 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9276 /* ??? We may not combine comparisons done in a CCmode with
9277 comparisons not done in a CCmode. This is to aid targets
9278 like Alpha that have an IEEE compliant EQ instruction, and
9279 a non-IEEE compliant BEQ instruction. The use of CCmode is
9280 actually artificial, simply to prevent the combination, but
9281 should not affect other platforms.
9283 However, we must allow VOIDmode comparisons to match either
9284 CCmode or non-CCmode comparison, because some ports have
9285 modeless comparisons inside branch patterns.
9287 ??? This mode check should perhaps look more like the mode check
9288 in simplify_comparison in combine. */
9290 if ((GET_CODE (SET_SRC (set)) == COMPARE
9291 || (((code == NE
9292 || (code == LT
9293 && GET_MODE_CLASS (inner_mode) == MODE_INT
9294 && (GET_MODE_BITSIZE (inner_mode)
9295 <= HOST_BITS_PER_WIDE_INT)
9296 && (STORE_FLAG_VALUE
9297 & ((HOST_WIDE_INT) 1
9298 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9299 #ifdef FLOAT_STORE_FLAG_VALUE
9300 || (code == LT
9301 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9302 && (REAL_VALUE_NEGATIVE
9303 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9304 #endif
9306 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9307 && (((GET_MODE_CLASS (mode) == MODE_CC)
9308 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9309 || mode == VOIDmode || inner_mode == VOIDmode))
9310 x = SET_SRC (set);
9311 else if (((code == EQ
9312 || (code == GE
9313 && (GET_MODE_BITSIZE (inner_mode)
9314 <= HOST_BITS_PER_WIDE_INT)
9315 && GET_MODE_CLASS (inner_mode) == MODE_INT
9316 && (STORE_FLAG_VALUE
9317 & ((HOST_WIDE_INT) 1
9318 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9319 #ifdef FLOAT_STORE_FLAG_VALUE
9320 || (code == GE
9321 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9322 && (REAL_VALUE_NEGATIVE
9323 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9324 #endif
9326 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9327 && (((GET_MODE_CLASS (mode) == MODE_CC)
9328 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9329 || mode == VOIDmode || inner_mode == VOIDmode))
9332 reverse_code = 1;
9333 x = SET_SRC (set);
9335 else
9336 break;
9339 else if (reg_set_p (op0, prev))
9340 /* If this sets OP0, but not directly, we have to give up. */
9341 break;
9343 if (x)
9345 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9346 code = GET_CODE (x);
9347 if (reverse_code)
9349 code = reversed_comparison_code (x, prev);
9350 if (code == UNKNOWN)
9351 return 0;
9352 reverse_code = 0;
9355 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9356 if (earliest)
9357 *earliest = prev;
9361 /* If constant is first, put it last. */
9362 if (CONSTANT_P (op0))
9363 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9365 /* If OP0 is the result of a comparison, we weren't able to find what
9366 was really being compared, so fail. */
9367 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9368 return 0;
9370 /* Canonicalize any ordered comparison with integers involving equality
9371 if we can do computations in the relevant mode and we do not
9372 overflow. */
9374 if (GET_CODE (op1) == CONST_INT
9375 && GET_MODE (op0) != VOIDmode
9376 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9378 HOST_WIDE_INT const_val = INTVAL (op1);
9379 unsigned HOST_WIDE_INT uconst_val = const_val;
9380 unsigned HOST_WIDE_INT max_val
9381 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9383 switch (code)
9385 case LE:
9386 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9387 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9388 break;
9390 /* When cross-compiling, const_val might be sign-extended from
9391 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9392 case GE:
9393 if ((HOST_WIDE_INT) (const_val & max_val)
9394 != (((HOST_WIDE_INT) 1
9395 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9396 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9397 break;
9399 case LEU:
9400 if (uconst_val < max_val)
9401 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9402 break;
9404 case GEU:
9405 if (uconst_val != 0)
9406 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9407 break;
9409 default:
9410 break;
9414 #ifdef HAVE_cc0
9415 /* Never return CC0; return zero instead. */
9416 if (op0 == cc0_rtx)
9417 return 0;
9418 #endif
9420 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9423 /* Given a jump insn JUMP, return the condition that will cause it to branch
9424 to its JUMP_LABEL. If the condition cannot be understood, or is an
9425 inequality floating-point comparison which needs to be reversed, 0 will
9426 be returned.
9428 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9429 insn used in locating the condition was found. If a replacement test
9430 of the condition is desired, it should be placed in front of that
9431 insn and we will be sure that the inputs are still valid. */
9434 get_condition (jump, earliest)
9435 rtx jump;
9436 rtx *earliest;
9438 rtx cond;
9439 int reverse;
9440 rtx set;
9442 /* If this is not a standard conditional jump, we can't parse it. */
9443 if (GET_CODE (jump) != JUMP_INSN
9444 || ! any_condjump_p (jump))
9445 return 0;
9446 set = pc_set (jump);
9448 cond = XEXP (SET_SRC (set), 0);
9450 /* If this branches to JUMP_LABEL when the condition is false, reverse
9451 the condition. */
9452 reverse
9453 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9454 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9456 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9459 /* Similar to above routine, except that we also put an invariant last
9460 unless both operands are invariants. */
9463 get_condition_for_loop (loop, x)
9464 const struct loop *loop;
9465 rtx x;
9467 rtx comparison = get_condition (x, (rtx*) 0);
9469 if (comparison == 0
9470 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9471 || loop_invariant_p (loop, XEXP (comparison, 1)))
9472 return comparison;
9474 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9475 XEXP (comparison, 1), XEXP (comparison, 0));
9478 /* Scan the function and determine whether it has indirect (computed) jumps.
9480 This is taken mostly from flow.c; similar code exists elsewhere
9481 in the compiler. It may be useful to put this into rtlanal.c. */
9482 static int
9483 indirect_jump_in_function_p (start)
9484 rtx start;
9486 rtx insn;
9488 for (insn = start; insn; insn = NEXT_INSN (insn))
9489 if (computed_jump_p (insn))
9490 return 1;
9492 return 0;
9495 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9496 documentation for LOOP_MEMS for the definition of `appropriate'.
9497 This function is called from prescan_loop via for_each_rtx. */
9499 static int
9500 insert_loop_mem (mem, data)
9501 rtx *mem;
9502 void *data ATTRIBUTE_UNUSED;
9504 struct loop_info *loop_info = data;
9505 int i;
9506 rtx m = *mem;
9508 if (m == NULL_RTX)
9509 return 0;
9511 switch (GET_CODE (m))
9513 case MEM:
9514 break;
9516 case CLOBBER:
9517 /* We're not interested in MEMs that are only clobbered. */
9518 return -1;
9520 case CONST_DOUBLE:
9521 /* We're not interested in the MEM associated with a
9522 CONST_DOUBLE, so there's no need to traverse into this. */
9523 return -1;
9525 case EXPR_LIST:
9526 /* We're not interested in any MEMs that only appear in notes. */
9527 return -1;
9529 default:
9530 /* This is not a MEM. */
9531 return 0;
9534 /* See if we've already seen this MEM. */
9535 for (i = 0; i < loop_info->mems_idx; ++i)
9536 if (rtx_equal_p (m, loop_info->mems[i].mem))
9538 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9539 /* The modes of the two memory accesses are different. If
9540 this happens, something tricky is going on, and we just
9541 don't optimize accesses to this MEM. */
9542 loop_info->mems[i].optimize = 0;
9544 return 0;
9547 /* Resize the array, if necessary. */
9548 if (loop_info->mems_idx == loop_info->mems_allocated)
9550 if (loop_info->mems_allocated != 0)
9551 loop_info->mems_allocated *= 2;
9552 else
9553 loop_info->mems_allocated = 32;
9555 loop_info->mems = (loop_mem_info *)
9556 xrealloc (loop_info->mems,
9557 loop_info->mems_allocated * sizeof (loop_mem_info));
9560 /* Actually insert the MEM. */
9561 loop_info->mems[loop_info->mems_idx].mem = m;
9562 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9563 because we can't put it in a register. We still store it in the
9564 table, though, so that if we see the same address later, but in a
9565 non-BLK mode, we'll not think we can optimize it at that point. */
9566 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9567 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9568 ++loop_info->mems_idx;
9570 return 0;
9574 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9576 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9577 register that is modified by an insn between FROM and TO. If the
9578 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9579 more, stop incrementing it, to avoid overflow.
9581 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9582 register I is used, if it is only used once. Otherwise, it is set
9583 to 0 (for no uses) or const0_rtx for more than one use. This
9584 parameter may be zero, in which case this processing is not done.
9586 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9587 optimize register I. */
9589 static void
9590 loop_regs_scan (loop, extra_size)
9591 const struct loop *loop;
9592 int extra_size;
9594 struct loop_regs *regs = LOOP_REGS (loop);
9595 int old_nregs;
9596 /* last_set[n] is nonzero iff reg n has been set in the current
9597 basic block. In that case, it is the insn that last set reg n. */
9598 rtx *last_set;
9599 rtx insn;
9600 int i;
9602 old_nregs = regs->num;
9603 regs->num = max_reg_num ();
9605 /* Grow the regs array if not allocated or too small. */
9606 if (regs->num >= regs->size)
9608 regs->size = regs->num + extra_size;
9610 regs->array = (struct loop_reg *)
9611 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9613 /* Zero the new elements. */
9614 memset (regs->array + old_nregs, 0,
9615 (regs->size - old_nregs) * sizeof (*regs->array));
9618 /* Clear previously scanned fields but do not clear n_times_set. */
9619 for (i = 0; i < old_nregs; i++)
9621 regs->array[i].set_in_loop = 0;
9622 regs->array[i].may_not_optimize = 0;
9623 regs->array[i].single_usage = NULL_RTX;
9626 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9628 /* Scan the loop, recording register usage. */
9629 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9630 insn = NEXT_INSN (insn))
9632 if (INSN_P (insn))
9634 /* Record registers that have exactly one use. */
9635 find_single_use_in_loop (regs, insn, PATTERN (insn));
9637 /* Include uses in REG_EQUAL notes. */
9638 if (REG_NOTES (insn))
9639 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9641 if (GET_CODE (PATTERN (insn)) == SET
9642 || GET_CODE (PATTERN (insn)) == CLOBBER)
9643 count_one_set (regs, insn, PATTERN (insn), last_set);
9644 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9646 int i;
9647 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9648 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9649 last_set);
9653 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9654 memset (last_set, 0, regs->num * sizeof (rtx));
9657 /* Invalidate all hard registers clobbered by calls. With one exception:
9658 a call-clobbered PIC register is still function-invariant for our
9659 purposes, since we can hoist any PIC calculations out of the loop.
9660 Thus the call to rtx_varies_p. */
9661 if (LOOP_INFO (loop)->has_call)
9662 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9663 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9664 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
9666 regs->array[i].may_not_optimize = 1;
9667 regs->array[i].set_in_loop = 1;
9670 #ifdef AVOID_CCMODE_COPIES
9671 /* Don't try to move insns which set CC registers if we should not
9672 create CCmode register copies. */
9673 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9674 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9675 regs->array[i].may_not_optimize = 1;
9676 #endif
9678 /* Set regs->array[I].n_times_set for the new registers. */
9679 for (i = old_nregs; i < regs->num; i++)
9680 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9682 free (last_set);
9685 /* Returns the number of real INSNs in the LOOP. */
9687 static int
9688 count_insns_in_loop (loop)
9689 const struct loop *loop;
9691 int count = 0;
9692 rtx insn;
9694 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9695 insn = NEXT_INSN (insn))
9696 if (INSN_P (insn))
9697 ++count;
9699 return count;
9702 /* Move MEMs into registers for the duration of the loop. */
9704 static void
9705 load_mems (loop)
9706 const struct loop *loop;
9708 struct loop_info *loop_info = LOOP_INFO (loop);
9709 struct loop_regs *regs = LOOP_REGS (loop);
9710 int maybe_never = 0;
9711 int i;
9712 rtx p, prev_ebb_head;
9713 rtx label = NULL_RTX;
9714 rtx end_label;
9715 /* Nonzero if the next instruction may never be executed. */
9716 int next_maybe_never = 0;
9717 unsigned int last_max_reg = max_reg_num ();
9719 if (loop_info->mems_idx == 0)
9720 return;
9722 /* We cannot use next_label here because it skips over normal insns. */
9723 end_label = next_nonnote_insn (loop->end);
9724 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9725 end_label = NULL_RTX;
9727 /* Check to see if it's possible that some instructions in the loop are
9728 never executed. Also check if there is a goto out of the loop other
9729 than right after the end of the loop. */
9730 for (p = next_insn_in_loop (loop, loop->scan_start);
9731 p != NULL_RTX;
9732 p = next_insn_in_loop (loop, p))
9734 if (GET_CODE (p) == CODE_LABEL)
9735 maybe_never = 1;
9736 else if (GET_CODE (p) == JUMP_INSN
9737 /* If we enter the loop in the middle, and scan
9738 around to the beginning, don't set maybe_never
9739 for that. This must be an unconditional jump,
9740 otherwise the code at the top of the loop might
9741 never be executed. Unconditional jumps are
9742 followed a by barrier then loop end. */
9743 && ! (GET_CODE (p) == JUMP_INSN
9744 && JUMP_LABEL (p) == loop->top
9745 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9746 && any_uncondjump_p (p)))
9748 /* If this is a jump outside of the loop but not right
9749 after the end of the loop, we would have to emit new fixup
9750 sequences for each such label. */
9751 if (/* If we can't tell where control might go when this
9752 JUMP_INSN is executed, we must be conservative. */
9753 !JUMP_LABEL (p)
9754 || (JUMP_LABEL (p) != end_label
9755 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9756 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9757 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9758 return;
9760 if (!any_condjump_p (p))
9761 /* Something complicated. */
9762 maybe_never = 1;
9763 else
9764 /* If there are any more instructions in the loop, they
9765 might not be reached. */
9766 next_maybe_never = 1;
9768 else if (next_maybe_never)
9769 maybe_never = 1;
9772 /* Find start of the extended basic block that enters the loop. */
9773 for (p = loop->start;
9774 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9775 p = PREV_INSN (p))
9777 prev_ebb_head = p;
9779 cselib_init ();
9781 /* Build table of mems that get set to constant values before the
9782 loop. */
9783 for (; p != loop->start; p = NEXT_INSN (p))
9784 cselib_process_insn (p);
9786 /* Actually move the MEMs. */
9787 for (i = 0; i < loop_info->mems_idx; ++i)
9789 regset_head load_copies;
9790 regset_head store_copies;
9791 int written = 0;
9792 rtx reg;
9793 rtx mem = loop_info->mems[i].mem;
9794 rtx mem_list_entry;
9796 if (MEM_VOLATILE_P (mem)
9797 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9798 /* There's no telling whether or not MEM is modified. */
9799 loop_info->mems[i].optimize = 0;
9801 /* Go through the MEMs written to in the loop to see if this
9802 one is aliased by one of them. */
9803 mem_list_entry = loop_info->store_mems;
9804 while (mem_list_entry)
9806 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9807 written = 1;
9808 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9809 mem, rtx_varies_p))
9811 /* MEM is indeed aliased by this store. */
9812 loop_info->mems[i].optimize = 0;
9813 break;
9815 mem_list_entry = XEXP (mem_list_entry, 1);
9818 if (flag_float_store && written
9819 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9820 loop_info->mems[i].optimize = 0;
9822 /* If this MEM is written to, we must be sure that there
9823 are no reads from another MEM that aliases this one. */
9824 if (loop_info->mems[i].optimize && written)
9826 int j;
9828 for (j = 0; j < loop_info->mems_idx; ++j)
9830 if (j == i)
9831 continue;
9832 else if (true_dependence (mem,
9833 VOIDmode,
9834 loop_info->mems[j].mem,
9835 rtx_varies_p))
9837 /* It's not safe to hoist loop_info->mems[i] out of
9838 the loop because writes to it might not be
9839 seen by reads from loop_info->mems[j]. */
9840 loop_info->mems[i].optimize = 0;
9841 break;
9846 if (maybe_never && may_trap_p (mem))
9847 /* We can't access the MEM outside the loop; it might
9848 cause a trap that wouldn't have happened otherwise. */
9849 loop_info->mems[i].optimize = 0;
9851 if (!loop_info->mems[i].optimize)
9852 /* We thought we were going to lift this MEM out of the
9853 loop, but later discovered that we could not. */
9854 continue;
9856 INIT_REG_SET (&load_copies);
9857 INIT_REG_SET (&store_copies);
9859 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9860 order to keep scan_loop from moving stores to this MEM
9861 out of the loop just because this REG is neither a
9862 user-variable nor used in the loop test. */
9863 reg = gen_reg_rtx (GET_MODE (mem));
9864 REG_USERVAR_P (reg) = 1;
9865 loop_info->mems[i].reg = reg;
9867 /* Now, replace all references to the MEM with the
9868 corresponding pseudos. */
9869 maybe_never = 0;
9870 for (p = next_insn_in_loop (loop, loop->scan_start);
9871 p != NULL_RTX;
9872 p = next_insn_in_loop (loop, p))
9874 if (INSN_P (p))
9876 rtx set;
9878 set = single_set (p);
9880 /* See if this copies the mem into a register that isn't
9881 modified afterwards. We'll try to do copy propagation
9882 a little further on. */
9883 if (set
9884 /* @@@ This test is _way_ too conservative. */
9885 && ! maybe_never
9886 && GET_CODE (SET_DEST (set)) == REG
9887 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9888 && REGNO (SET_DEST (set)) < last_max_reg
9889 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9890 && rtx_equal_p (SET_SRC (set), mem))
9891 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9893 /* See if this copies the mem from a register that isn't
9894 modified afterwards. We'll try to remove the
9895 redundant copy later on by doing a little register
9896 renaming and copy propagation. This will help
9897 to untangle things for the BIV detection code. */
9898 if (set
9899 && ! maybe_never
9900 && GET_CODE (SET_SRC (set)) == REG
9901 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9902 && REGNO (SET_SRC (set)) < last_max_reg
9903 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9904 && rtx_equal_p (SET_DEST (set), mem))
9905 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9907 /* If this is a call which uses / clobbers this memory
9908 location, we must not change the interface here. */
9909 if (GET_CODE (p) == CALL_INSN
9910 && reg_mentioned_p (loop_info->mems[i].mem,
9911 CALL_INSN_FUNCTION_USAGE (p)))
9913 cancel_changes (0);
9914 loop_info->mems[i].optimize = 0;
9915 break;
9917 else
9918 /* Replace the memory reference with the shadow register. */
9919 replace_loop_mems (p, loop_info->mems[i].mem,
9920 loop_info->mems[i].reg);
9923 if (GET_CODE (p) == CODE_LABEL
9924 || GET_CODE (p) == JUMP_INSN)
9925 maybe_never = 1;
9928 if (! loop_info->mems[i].optimize)
9929 ; /* We found we couldn't do the replacement, so do nothing. */
9930 else if (! apply_change_group ())
9931 /* We couldn't replace all occurrences of the MEM. */
9932 loop_info->mems[i].optimize = 0;
9933 else
9935 /* Load the memory immediately before LOOP->START, which is
9936 the NOTE_LOOP_BEG. */
9937 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9938 rtx set;
9939 rtx best = mem;
9940 int j;
9941 struct elt_loc_list *const_equiv = 0;
9943 if (e)
9945 struct elt_loc_list *equiv;
9946 struct elt_loc_list *best_equiv = 0;
9947 for (equiv = e->locs; equiv; equiv = equiv->next)
9949 if (CONSTANT_P (equiv->loc))
9950 const_equiv = equiv;
9951 else if (GET_CODE (equiv->loc) == REG
9952 /* Extending hard register lifetimes causes crash
9953 on SRC targets. Doing so on non-SRC is
9954 probably also not good idea, since we most
9955 probably have pseudoregister equivalence as
9956 well. */
9957 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9958 best_equiv = equiv;
9960 /* Use the constant equivalence if that is cheap enough. */
9961 if (! best_equiv)
9962 best_equiv = const_equiv;
9963 else if (const_equiv
9964 && (rtx_cost (const_equiv->loc, SET)
9965 <= rtx_cost (best_equiv->loc, SET)))
9967 best_equiv = const_equiv;
9968 const_equiv = 0;
9971 /* If best_equiv is nonzero, we know that MEM is set to a
9972 constant or register before the loop. We will use this
9973 knowledge to initialize the shadow register with that
9974 constant or reg rather than by loading from MEM. */
9975 if (best_equiv)
9976 best = copy_rtx (best_equiv->loc);
9979 set = gen_move_insn (reg, best);
9980 set = loop_insn_hoist (loop, set);
9981 if (REG_P (best))
9983 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9984 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9986 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9987 break;
9991 if (const_equiv)
9992 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9994 if (written)
9996 if (label == NULL_RTX)
9998 label = gen_label_rtx ();
9999 emit_label_after (label, loop->end);
10002 /* Store the memory immediately after END, which is
10003 the NOTE_LOOP_END. */
10004 set = gen_move_insn (copy_rtx (mem), reg);
10005 loop_insn_emit_after (loop, 0, label, set);
10008 if (loop_dump_stream)
10010 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10011 REGNO (reg), (written ? "r/w" : "r/o"));
10012 print_rtl (loop_dump_stream, mem);
10013 fputc ('\n', loop_dump_stream);
10016 /* Attempt a bit of copy propagation. This helps untangle the
10017 data flow, and enables {basic,general}_induction_var to find
10018 more bivs/givs. */
10019 EXECUTE_IF_SET_IN_REG_SET
10020 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10022 try_copy_prop (loop, reg, j);
10024 CLEAR_REG_SET (&load_copies);
10026 EXECUTE_IF_SET_IN_REG_SET
10027 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10029 try_swap_copy_prop (loop, reg, j);
10031 CLEAR_REG_SET (&store_copies);
10035 if (label != NULL_RTX && end_label != NULL_RTX)
10037 /* Now, we need to replace all references to the previous exit
10038 label with the new one. */
10039 rtx_pair rr;
10040 rr.r1 = end_label;
10041 rr.r2 = label;
10043 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10045 for_each_rtx (&p, replace_label, &rr);
10047 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10048 field. This is not handled by for_each_rtx because it doesn't
10049 handle unprinted ('0') fields. We need to update JUMP_LABEL
10050 because the immediately following unroll pass will use it.
10051 replace_label would not work anyways, because that only handles
10052 LABEL_REFs. */
10053 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10054 JUMP_LABEL (p) = label;
10058 cselib_finish ();
10061 /* For communication between note_reg_stored and its caller. */
10062 struct note_reg_stored_arg
10064 int set_seen;
10065 rtx reg;
10068 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10069 is equal to ARG. */
10070 static void
10071 note_reg_stored (x, setter, arg)
10072 rtx x, setter ATTRIBUTE_UNUSED;
10073 void *arg;
10075 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10076 if (t->reg == x)
10077 t->set_seen = 1;
10080 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10081 There must be exactly one insn that sets this pseudo; it will be
10082 deleted if all replacements succeed and we can prove that the register
10083 is not used after the loop. */
10085 static void
10086 try_copy_prop (loop, replacement, regno)
10087 const struct loop *loop;
10088 rtx replacement;
10089 unsigned int regno;
10091 /* This is the reg that we are copying from. */
10092 rtx reg_rtx = regno_reg_rtx[regno];
10093 rtx init_insn = 0;
10094 rtx insn;
10095 /* These help keep track of whether we replaced all uses of the reg. */
10096 int replaced_last = 0;
10097 int store_is_first = 0;
10099 for (insn = next_insn_in_loop (loop, loop->scan_start);
10100 insn != NULL_RTX;
10101 insn = next_insn_in_loop (loop, insn))
10103 rtx set;
10105 /* Only substitute within one extended basic block from the initializing
10106 insn. */
10107 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10108 break;
10110 if (! INSN_P (insn))
10111 continue;
10113 /* Is this the initializing insn? */
10114 set = single_set (insn);
10115 if (set
10116 && GET_CODE (SET_DEST (set)) == REG
10117 && REGNO (SET_DEST (set)) == regno)
10119 if (init_insn)
10120 abort ();
10122 init_insn = insn;
10123 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10124 store_is_first = 1;
10127 /* Only substitute after seeing the initializing insn. */
10128 if (init_insn && insn != init_insn)
10130 struct note_reg_stored_arg arg;
10132 replace_loop_regs (insn, reg_rtx, replacement);
10133 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10134 replaced_last = 1;
10136 /* Stop replacing when REPLACEMENT is modified. */
10137 arg.reg = replacement;
10138 arg.set_seen = 0;
10139 note_stores (PATTERN (insn), note_reg_stored, &arg);
10140 if (arg.set_seen)
10142 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10144 /* It is possible that we've turned previously valid REG_EQUAL to
10145 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10146 REPLACEMENT is modified, we get different meaning. */
10147 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10148 remove_note (insn, note);
10149 break;
10153 if (! init_insn)
10154 abort ();
10155 if (apply_change_group ())
10157 if (loop_dump_stream)
10158 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10159 if (store_is_first && replaced_last)
10161 rtx first;
10162 rtx retval_note;
10164 /* Assume we're just deleting INIT_INSN. */
10165 first = init_insn;
10166 /* Look for REG_RETVAL note. If we're deleting the end of
10167 the libcall sequence, the whole sequence can go. */
10168 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10169 /* If we found a REG_RETVAL note, find the first instruction
10170 in the sequence. */
10171 if (retval_note)
10172 first = XEXP (retval_note, 0);
10174 /* Delete the instructions. */
10175 loop_delete_insns (first, init_insn);
10177 if (loop_dump_stream)
10178 fprintf (loop_dump_stream, ".\n");
10182 /* Replace all the instructions from FIRST up to and including LAST
10183 with NOTE_INSN_DELETED notes. */
10185 static void
10186 loop_delete_insns (first, last)
10187 rtx first;
10188 rtx last;
10190 while (1)
10192 if (loop_dump_stream)
10193 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10194 INSN_UID (first));
10195 delete_insn (first);
10197 /* If this was the LAST instructions we're supposed to delete,
10198 we're done. */
10199 if (first == last)
10200 break;
10202 first = NEXT_INSN (first);
10206 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10207 loop LOOP if the order of the sets of these registers can be
10208 swapped. There must be exactly one insn within the loop that sets
10209 this pseudo followed immediately by a move insn that sets
10210 REPLACEMENT with REGNO. */
10211 static void
10212 try_swap_copy_prop (loop, replacement, regno)
10213 const struct loop *loop;
10214 rtx replacement;
10215 unsigned int regno;
10217 rtx insn;
10218 rtx set = NULL_RTX;
10219 unsigned int new_regno;
10221 new_regno = REGNO (replacement);
10223 for (insn = next_insn_in_loop (loop, loop->scan_start);
10224 insn != NULL_RTX;
10225 insn = next_insn_in_loop (loop, insn))
10227 /* Search for the insn that copies REGNO to NEW_REGNO? */
10228 if (INSN_P (insn)
10229 && (set = single_set (insn))
10230 && GET_CODE (SET_DEST (set)) == REG
10231 && REGNO (SET_DEST (set)) == new_regno
10232 && GET_CODE (SET_SRC (set)) == REG
10233 && REGNO (SET_SRC (set)) == regno)
10234 break;
10237 if (insn != NULL_RTX)
10239 rtx prev_insn;
10240 rtx prev_set;
10242 /* Some DEF-USE info would come in handy here to make this
10243 function more general. For now, just check the previous insn
10244 which is the most likely candidate for setting REGNO. */
10246 prev_insn = PREV_INSN (insn);
10248 if (INSN_P (insn)
10249 && (prev_set = single_set (prev_insn))
10250 && GET_CODE (SET_DEST (prev_set)) == REG
10251 && REGNO (SET_DEST (prev_set)) == regno)
10253 /* We have:
10254 (set (reg regno) (expr))
10255 (set (reg new_regno) (reg regno))
10257 so try converting this to:
10258 (set (reg new_regno) (expr))
10259 (set (reg regno) (reg new_regno))
10261 The former construct is often generated when a global
10262 variable used for an induction variable is shadowed by a
10263 register (NEW_REGNO). The latter construct improves the
10264 chances of GIV replacement and BIV elimination. */
10266 validate_change (prev_insn, &SET_DEST (prev_set),
10267 replacement, 1);
10268 validate_change (insn, &SET_DEST (set),
10269 SET_SRC (set), 1);
10270 validate_change (insn, &SET_SRC (set),
10271 replacement, 1);
10273 if (apply_change_group ())
10275 if (loop_dump_stream)
10276 fprintf (loop_dump_stream,
10277 " Swapped set of reg %d at %d with reg %d at %d.\n",
10278 regno, INSN_UID (insn),
10279 new_regno, INSN_UID (prev_insn));
10281 /* Update first use of REGNO. */
10282 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10283 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10285 /* Now perform copy propagation to hopefully
10286 remove all uses of REGNO within the loop. */
10287 try_copy_prop (loop, replacement, regno);
10293 /* Replace MEM with its associated pseudo register. This function is
10294 called from load_mems via for_each_rtx. DATA is actually a pointer
10295 to a structure describing the instruction currently being scanned
10296 and the MEM we are currently replacing. */
10298 static int
10299 replace_loop_mem (mem, data)
10300 rtx *mem;
10301 void *data;
10303 loop_replace_args *args = (loop_replace_args *) data;
10304 rtx m = *mem;
10306 if (m == NULL_RTX)
10307 return 0;
10309 switch (GET_CODE (m))
10311 case MEM:
10312 break;
10314 case CONST_DOUBLE:
10315 /* We're not interested in the MEM associated with a
10316 CONST_DOUBLE, so there's no need to traverse into one. */
10317 return -1;
10319 default:
10320 /* This is not a MEM. */
10321 return 0;
10324 if (!rtx_equal_p (args->match, m))
10325 /* This is not the MEM we are currently replacing. */
10326 return 0;
10328 /* Actually replace the MEM. */
10329 validate_change (args->insn, mem, args->replacement, 1);
10331 return 0;
10334 static void
10335 replace_loop_mems (insn, mem, reg)
10336 rtx insn;
10337 rtx mem;
10338 rtx reg;
10340 loop_replace_args args;
10342 args.insn = insn;
10343 args.match = mem;
10344 args.replacement = reg;
10346 for_each_rtx (&insn, replace_loop_mem, &args);
10349 /* Replace one register with another. Called through for_each_rtx; PX points
10350 to the rtx being scanned. DATA is actually a pointer to
10351 a structure of arguments. */
10353 static int
10354 replace_loop_reg (px, data)
10355 rtx *px;
10356 void *data;
10358 rtx x = *px;
10359 loop_replace_args *args = (loop_replace_args *) data;
10361 if (x == NULL_RTX)
10362 return 0;
10364 if (x == args->match)
10365 validate_change (args->insn, px, args->replacement, 1);
10367 return 0;
10370 static void
10371 replace_loop_regs (insn, reg, replacement)
10372 rtx insn;
10373 rtx reg;
10374 rtx replacement;
10376 loop_replace_args args;
10378 args.insn = insn;
10379 args.match = reg;
10380 args.replacement = replacement;
10382 for_each_rtx (&insn, replace_loop_reg, &args);
10385 /* Replace occurrences of the old exit label for the loop with the new
10386 one. DATA is an rtx_pair containing the old and new labels,
10387 respectively. */
10389 static int
10390 replace_label (x, data)
10391 rtx *x;
10392 void *data;
10394 rtx l = *x;
10395 rtx old_label = ((rtx_pair *) data)->r1;
10396 rtx new_label = ((rtx_pair *) data)->r2;
10398 if (l == NULL_RTX)
10399 return 0;
10401 if (GET_CODE (l) != LABEL_REF)
10402 return 0;
10404 if (XEXP (l, 0) != old_label)
10405 return 0;
10407 XEXP (l, 0) = new_label;
10408 ++LABEL_NUSES (new_label);
10409 --LABEL_NUSES (old_label);
10411 return 0;
10414 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10415 (ignored in the interim). */
10417 static rtx
10418 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10419 const struct loop *loop ATTRIBUTE_UNUSED;
10420 basic_block where_bb ATTRIBUTE_UNUSED;
10421 rtx where_insn;
10422 rtx pattern;
10424 return emit_insn_after (pattern, where_insn);
10428 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10429 in basic block WHERE_BB (ignored in the interim) within the loop
10430 otherwise hoist PATTERN into the loop pre-header. */
10433 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10434 const struct loop *loop;
10435 basic_block where_bb ATTRIBUTE_UNUSED;
10436 rtx where_insn;
10437 rtx pattern;
10439 if (! where_insn)
10440 return loop_insn_hoist (loop, pattern);
10441 return emit_insn_before (pattern, where_insn);
10445 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10446 WHERE_BB (ignored in the interim) within the loop. */
10448 static rtx
10449 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10450 const struct loop *loop ATTRIBUTE_UNUSED;
10451 basic_block where_bb ATTRIBUTE_UNUSED;
10452 rtx where_insn;
10453 rtx pattern;
10455 return emit_call_insn_before (pattern, where_insn);
10459 /* Hoist insn for PATTERN into the loop pre-header. */
10462 loop_insn_hoist (loop, pattern)
10463 const struct loop *loop;
10464 rtx pattern;
10466 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10470 /* Hoist call insn for PATTERN into the loop pre-header. */
10472 static rtx
10473 loop_call_insn_hoist (loop, pattern)
10474 const struct loop *loop;
10475 rtx pattern;
10477 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10481 /* Sink insn for PATTERN after the loop end. */
10484 loop_insn_sink (loop, pattern)
10485 const struct loop *loop;
10486 rtx pattern;
10488 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10491 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10492 and constant. Emit sequence of intructions to load it into REG */
10493 static rtx
10494 gen_load_of_final_value (reg, final_value)
10495 rtx reg, final_value;
10497 rtx seq;
10498 start_sequence ();
10499 final_value = force_operand (final_value, reg);
10500 if (final_value != reg)
10501 emit_move_insn (reg, final_value);
10502 seq = get_insns ();
10503 end_sequence ();
10504 return seq;
10507 /* If the loop has multiple exits, emit insn for PATTERN before the
10508 loop to ensure that it will always be executed no matter how the
10509 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10510 since this is slightly more efficient. */
10512 static rtx
10513 loop_insn_sink_or_swim (loop, pattern)
10514 const struct loop *loop;
10515 rtx pattern;
10517 if (loop->exit_count)
10518 return loop_insn_hoist (loop, pattern);
10519 else
10520 return loop_insn_sink (loop, pattern);
10523 static void
10524 loop_ivs_dump (loop, file, verbose)
10525 const struct loop *loop;
10526 FILE *file;
10527 int verbose;
10529 struct iv_class *bl;
10530 int iv_num = 0;
10532 if (! loop || ! file)
10533 return;
10535 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10536 iv_num++;
10538 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10540 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10542 loop_iv_class_dump (bl, file, verbose);
10543 fputc ('\n', file);
10548 static void
10549 loop_iv_class_dump (bl, file, verbose)
10550 const struct iv_class *bl;
10551 FILE *file;
10552 int verbose ATTRIBUTE_UNUSED;
10554 struct induction *v;
10555 rtx incr;
10556 int i;
10558 if (! bl || ! file)
10559 return;
10561 fprintf (file, "IV class for reg %d, benefit %d\n",
10562 bl->regno, bl->total_benefit);
10564 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10565 if (bl->initial_value)
10567 fprintf (file, ", init val: ");
10568 print_simple_rtl (file, bl->initial_value);
10570 if (bl->initial_test)
10572 fprintf (file, ", init test: ");
10573 print_simple_rtl (file, bl->initial_test);
10575 fputc ('\n', file);
10577 if (bl->final_value)
10579 fprintf (file, " Final val: ");
10580 print_simple_rtl (file, bl->final_value);
10581 fputc ('\n', file);
10584 if ((incr = biv_total_increment (bl)))
10586 fprintf (file, " Total increment: ");
10587 print_simple_rtl (file, incr);
10588 fputc ('\n', file);
10591 /* List the increments. */
10592 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10594 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10595 print_simple_rtl (file, v->add_val);
10596 fputc ('\n', file);
10599 /* List the givs. */
10600 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10602 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10603 i, INSN_UID (v->insn), v->benefit);
10604 if (v->giv_type == DEST_ADDR)
10605 print_simple_rtl (file, v->mem);
10606 else
10607 print_simple_rtl (file, single_set (v->insn));
10608 fputc ('\n', file);
10613 static void
10614 loop_biv_dump (v, file, verbose)
10615 const struct induction *v;
10616 FILE *file;
10617 int verbose;
10619 if (! v || ! file)
10620 return;
10622 fprintf (file,
10623 "Biv %d: insn %d",
10624 REGNO (v->dest_reg), INSN_UID (v->insn));
10625 fprintf (file, " const ");
10626 print_simple_rtl (file, v->add_val);
10628 if (verbose && v->final_value)
10630 fputc ('\n', file);
10631 fprintf (file, " final ");
10632 print_simple_rtl (file, v->final_value);
10635 fputc ('\n', file);
10639 static void
10640 loop_giv_dump (v, file, verbose)
10641 const struct induction *v;
10642 FILE *file;
10643 int verbose;
10645 if (! v || ! file)
10646 return;
10648 if (v->giv_type == DEST_REG)
10649 fprintf (file, "Giv %d: insn %d",
10650 REGNO (v->dest_reg), INSN_UID (v->insn));
10651 else
10652 fprintf (file, "Dest address: insn %d",
10653 INSN_UID (v->insn));
10655 fprintf (file, " src reg %d benefit %d",
10656 REGNO (v->src_reg), v->benefit);
10657 fprintf (file, " lifetime %d",
10658 v->lifetime);
10660 if (v->replaceable)
10661 fprintf (file, " replaceable");
10663 if (v->no_const_addval)
10664 fprintf (file, " ncav");
10666 if (v->ext_dependent)
10668 switch (GET_CODE (v->ext_dependent))
10670 case SIGN_EXTEND:
10671 fprintf (file, " ext se");
10672 break;
10673 case ZERO_EXTEND:
10674 fprintf (file, " ext ze");
10675 break;
10676 case TRUNCATE:
10677 fprintf (file, " ext tr");
10678 break;
10679 default:
10680 abort ();
10684 fputc ('\n', file);
10685 fprintf (file, " mult ");
10686 print_simple_rtl (file, v->mult_val);
10688 fputc ('\n', file);
10689 fprintf (file, " add ");
10690 print_simple_rtl (file, v->add_val);
10692 if (verbose && v->final_value)
10694 fputc ('\n', file);
10695 fprintf (file, " final ");
10696 print_simple_rtl (file, v->final_value);
10699 fputc ('\n', file);
10703 void
10704 debug_ivs (loop)
10705 const struct loop *loop;
10707 loop_ivs_dump (loop, stderr, 1);
10711 void
10712 debug_iv_class (bl)
10713 const struct iv_class *bl;
10715 loop_iv_class_dump (bl, stderr, 1);
10719 void
10720 debug_biv (v)
10721 const struct induction *v;
10723 loop_biv_dump (v, stderr, 1);
10727 void
10728 debug_giv (v)
10729 const struct induction *v;
10731 loop_giv_dump (v, stderr, 1);
10735 #define LOOP_BLOCK_NUM_1(INSN) \
10736 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10738 /* The notes do not have an assigned block, so look at the next insn. */
10739 #define LOOP_BLOCK_NUM(INSN) \
10740 ((INSN) ? (GET_CODE (INSN) == NOTE \
10741 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10742 : LOOP_BLOCK_NUM_1 (INSN)) \
10743 : -1)
10745 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10747 static void
10748 loop_dump_aux (loop, file, verbose)
10749 const struct loop *loop;
10750 FILE *file;
10751 int verbose ATTRIBUTE_UNUSED;
10753 rtx label;
10755 if (! loop || ! file)
10756 return;
10758 /* Print diagnostics to compare our concept of a loop with
10759 what the loop notes say. */
10760 if (! PREV_INSN (loop->first->head)
10761 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10762 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10763 != NOTE_INSN_LOOP_BEG)
10764 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10765 INSN_UID (PREV_INSN (loop->first->head)));
10766 if (! NEXT_INSN (loop->last->end)
10767 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10768 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10769 != NOTE_INSN_LOOP_END)
10770 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10771 INSN_UID (NEXT_INSN (loop->last->end)));
10773 if (loop->start)
10775 fprintf (file,
10776 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10777 LOOP_BLOCK_NUM (loop->start),
10778 LOOP_INSN_UID (loop->start),
10779 LOOP_BLOCK_NUM (loop->cont),
10780 LOOP_INSN_UID (loop->cont),
10781 LOOP_BLOCK_NUM (loop->cont),
10782 LOOP_INSN_UID (loop->cont),
10783 LOOP_BLOCK_NUM (loop->vtop),
10784 LOOP_INSN_UID (loop->vtop),
10785 LOOP_BLOCK_NUM (loop->end),
10786 LOOP_INSN_UID (loop->end));
10787 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10788 LOOP_BLOCK_NUM (loop->top),
10789 LOOP_INSN_UID (loop->top),
10790 LOOP_BLOCK_NUM (loop->scan_start),
10791 LOOP_INSN_UID (loop->scan_start));
10792 fprintf (file, ";; exit_count %d", loop->exit_count);
10793 if (loop->exit_count)
10795 fputs (", labels:", file);
10796 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10798 fprintf (file, " %d ",
10799 LOOP_INSN_UID (XEXP (label, 0)));
10802 fputs ("\n", file);
10804 /* This can happen when a marked loop appears as two nested loops,
10805 say from while (a || b) {}. The inner loop won't match
10806 the loop markers but the outer one will. */
10807 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10808 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10812 /* Call this function from the debugger to dump LOOP. */
10814 void
10815 debug_loop (loop)
10816 const struct loop *loop;
10818 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10821 /* Call this function from the debugger to dump LOOPS. */
10823 void
10824 debug_loops (loops)
10825 const struct loops *loops;
10827 flow_loops_dump (loops, stderr, loop_dump_aux, 1);