stl_bvector.h (swap(_Bit_reference,_Bit_reference)): Move/rename...
[official-gcc.git] / gcc / loop.c
blobbc75f8ba3b858e7d8737e44e9b44f88d0d2de4c7
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
127 /* Define a limit to how far apart indices can be and still be merged
128 into a single prefetch. */
129 #ifndef PREFETCH_EXTREME_DIFFERENCE
130 #define PREFETCH_EXTREME_DIFFERENCE 4096
131 #endif
133 /* Issue prefetch instructions before the loop to fetch data to be used
134 in the first few loop iterations. */
135 #ifndef PREFETCH_BEFORE_LOOP
136 #define PREFETCH_BEFORE_LOOP 1
137 #endif
139 /* Do not handle reversed order prefetches (negative stride). */
140 #ifndef PREFETCH_NO_REVERSE_ORDER
141 #define PREFETCH_NO_REVERSE_ORDER 1
142 #endif
144 /* Prefetch even if the GIV is in conditional code. */
145 #ifndef PREFETCH_CONDITIONAL
146 #define PREFETCH_CONDITIONAL 1
147 #endif
149 /* If the loop requires more prefetches than the target can process in
150 parallel then don't prefetch anything in that loop. */
151 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
152 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
153 #endif
155 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
156 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
158 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
159 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
160 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
162 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
163 ((REGNO) < FIRST_PSEUDO_REGISTER \
164 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
167 /* Vector mapping INSN_UIDs to luids.
168 The luids are like uids but increase monotonically always.
169 We use them to see whether a jump comes from outside a given loop. */
171 int *uid_luid;
173 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
174 number the insn is contained in. */
176 struct loop **uid_loop;
178 /* 1 + largest uid of any insn. */
180 int max_uid_for_loop;
182 /* 1 + luid of last insn. */
184 static int max_luid;
186 /* Number of loops detected in current function. Used as index to the
187 next few tables. */
189 static int max_loop_num;
191 /* Bound on pseudo register number before loop optimization.
192 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
193 unsigned int max_reg_before_loop;
195 /* The value to pass to the next call of reg_scan_update. */
196 static int loop_max_reg;
198 #define obstack_chunk_alloc xmalloc
199 #define obstack_chunk_free free
201 /* During the analysis of a loop, a chain of `struct movable's
202 is made to record all the movable insns found.
203 Then the entire chain can be scanned to decide which to move. */
205 struct movable
207 rtx insn; /* A movable insn */
208 rtx set_src; /* The expression this reg is set from. */
209 rtx set_dest; /* The destination of this SET. */
210 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
211 of any registers used within the LIBCALL. */
212 int consec; /* Number of consecutive following insns
213 that must be moved with this one. */
214 unsigned int regno; /* The register it sets */
215 short lifetime; /* lifetime of that register;
216 may be adjusted when matching movables
217 that load the same value are found. */
218 short savings; /* Number of insns we can move for this reg,
219 including other movables that force this
220 or match this one. */
221 unsigned int cond : 1; /* 1 if only conditionally movable */
222 unsigned int force : 1; /* 1 means MUST move this insn */
223 unsigned int global : 1; /* 1 means reg is live outside this loop */
224 /* If PARTIAL is 1, GLOBAL means something different:
225 that the reg is live outside the range from where it is set
226 to the following label. */
227 unsigned int done : 1; /* 1 inhibits further processing of this */
229 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
230 In particular, moving it does not make it
231 invariant. */
232 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
233 load SRC, rather than copying INSN. */
234 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
235 first insn of a consecutive sets group. */
236 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
237 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
238 that we should avoid changing when clearing
239 the rest of the reg. */
240 struct movable *match; /* First entry for same value */
241 struct movable *forces; /* An insn that must be moved if this is */
242 struct movable *next;
246 FILE *loop_dump_stream;
248 /* Forward declarations. */
250 static void invalidate_loops_containing_label PARAMS ((rtx));
251 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
252 static void mark_loop_jump PARAMS ((rtx, struct loop *));
253 static void prescan_loop PARAMS ((struct loop *));
254 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
255 static int consec_sets_invariant_p PARAMS ((const struct loop *,
256 rtx, int, rtx));
257 static int labels_in_range_p PARAMS ((rtx, int));
258 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
259 static void note_addr_stored PARAMS ((rtx, rtx, void *));
260 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
261 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
262 static void scan_loop PARAMS ((struct loop*, int));
263 #if 0
264 static void replace_call_address PARAMS ((rtx, rtx, rtx));
265 #endif
266 static rtx skip_consec_insns PARAMS ((rtx, int));
267 static int libcall_benefit PARAMS ((rtx));
268 static void ignore_some_movables PARAMS ((struct loop_movables *));
269 static void force_movables PARAMS ((struct loop_movables *));
270 static void combine_movables PARAMS ((struct loop_movables *,
271 struct loop_regs *));
272 static int num_unmoved_movables PARAMS ((const struct loop *));
273 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
274 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
275 struct loop_regs *));
276 static void add_label_notes PARAMS ((rtx, rtx));
277 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
278 int, int));
279 static void loop_movables_add PARAMS((struct loop_movables *,
280 struct movable *));
281 static void loop_movables_free PARAMS((struct loop_movables *));
282 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
283 static void loop_bivs_find PARAMS((struct loop *));
284 static void loop_bivs_init_find PARAMS((struct loop *));
285 static void loop_bivs_check PARAMS((struct loop *));
286 static void loop_givs_find PARAMS((struct loop *));
287 static void loop_givs_check PARAMS((struct loop *));
288 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
289 int, int));
290 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
291 struct induction *, rtx));
292 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
293 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
294 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
295 rtx *));
296 static void loop_ivs_free PARAMS((struct loop *));
297 static void strength_reduce PARAMS ((struct loop *, int));
298 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
299 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
300 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
301 static void record_biv PARAMS ((struct loop *, struct induction *,
302 rtx, rtx, rtx, rtx, rtx *,
303 int, int));
304 static void check_final_value PARAMS ((const struct loop *,
305 struct induction *));
306 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
307 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
308 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
309 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
310 static void record_giv PARAMS ((const struct loop *, struct induction *,
311 rtx, rtx, rtx, rtx, rtx, rtx, int,
312 enum g_types, int, int, rtx *));
313 static void update_giv_derive PARAMS ((const struct loop *, rtx));
314 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
315 struct loop_info *));
316 static int basic_induction_var PARAMS ((const struct loop *, rtx,
317 enum machine_mode, rtx, rtx,
318 rtx *, rtx *, rtx **));
319 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
320 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
321 rtx *, rtx *, rtx *, int, int *,
322 enum machine_mode));
323 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
324 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
325 static int check_dbra_loop PARAMS ((struct loop *, int));
326 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
327 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
328 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
329 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
330 static int product_cheap_p PARAMS ((rtx, rtx));
331 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
332 int, int, int));
333 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
334 struct iv_class *, int,
335 basic_block, rtx));
336 static int last_use_this_basic_block PARAMS ((rtx, rtx));
337 static void record_initial PARAMS ((rtx, rtx, void *));
338 static void update_reg_last_use PARAMS ((rtx, rtx));
339 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
340 static void loop_regs_scan PARAMS ((const struct loop *, int));
341 static int count_insns_in_loop PARAMS ((const struct loop *));
342 static void load_mems PARAMS ((const struct loop *));
343 static int insert_loop_mem PARAMS ((rtx *, void *));
344 static int replace_loop_mem PARAMS ((rtx *, void *));
345 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
346 static int replace_loop_reg PARAMS ((rtx *, void *));
347 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
348 static void note_reg_stored PARAMS ((rtx, rtx, void *));
349 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
350 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
351 unsigned int));
352 static int replace_label PARAMS ((rtx *, void *));
353 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
354 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
355 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
356 static void loop_regs_update PARAMS ((const struct loop *, rtx));
357 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
359 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
360 rtx, rtx));
361 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
362 basic_block, rtx, rtx));
363 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
364 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
366 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
367 static void loop_delete_insns PARAMS ((rtx, rtx));
368 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
369 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
370 void debug_ivs PARAMS ((const struct loop *));
371 void debug_iv_class PARAMS ((const struct iv_class *));
372 void debug_biv PARAMS ((const struct induction *));
373 void debug_giv PARAMS ((const struct induction *));
374 void debug_loop PARAMS ((const struct loop *));
375 void debug_loops PARAMS ((const struct loops *));
377 typedef struct rtx_pair
379 rtx r1;
380 rtx r2;
381 } rtx_pair;
383 typedef struct loop_replace_args
385 rtx match;
386 rtx replacement;
387 rtx insn;
388 } loop_replace_args;
390 /* Nonzero iff INSN is between START and END, inclusive. */
391 #define INSN_IN_RANGE_P(INSN, START, END) \
392 (INSN_UID (INSN) < max_uid_for_loop \
393 && INSN_LUID (INSN) >= INSN_LUID (START) \
394 && INSN_LUID (INSN) <= INSN_LUID (END))
396 /* Indirect_jump_in_function is computed once per function. */
397 static int indirect_jump_in_function;
398 static int indirect_jump_in_function_p PARAMS ((rtx));
400 static int compute_luids PARAMS ((rtx, rtx, int));
402 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
403 struct induction *,
404 rtx));
406 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
407 copy the value of the strength reduced giv to its original register. */
408 static int copy_cost;
410 /* Cost of using a register, to normalize the benefits of a giv. */
411 static int reg_address_cost;
413 void
414 init_loop ()
416 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
418 reg_address_cost = address_cost (reg, SImode);
420 copy_cost = COSTS_N_INSNS (1);
423 /* Compute the mapping from uids to luids.
424 LUIDs are numbers assigned to insns, like uids,
425 except that luids increase monotonically through the code.
426 Start at insn START and stop just before END. Assign LUIDs
427 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
428 static int
429 compute_luids (start, end, prev_luid)
430 rtx start, end;
431 int prev_luid;
433 int i;
434 rtx insn;
436 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
438 if (INSN_UID (insn) >= max_uid_for_loop)
439 continue;
440 /* Don't assign luids to line-number NOTEs, so that the distance in
441 luids between two insns is not affected by -g. */
442 if (GET_CODE (insn) != NOTE
443 || NOTE_LINE_NUMBER (insn) <= 0)
444 uid_luid[INSN_UID (insn)] = ++i;
445 else
446 /* Give a line number note the same luid as preceding insn. */
447 uid_luid[INSN_UID (insn)] = i;
449 return i + 1;
452 /* Entry point of this file. Perform loop optimization
453 on the current function. F is the first insn of the function
454 and DUMPFILE is a stream for output of a trace of actions taken
455 (or 0 if none should be output). */
457 void
458 loop_optimize (f, dumpfile, flags)
459 /* f is the first instruction of a chain of insns for one function */
460 rtx f;
461 FILE *dumpfile;
462 int flags;
464 rtx insn;
465 int i;
466 struct loops loops_data;
467 struct loops *loops = &loops_data;
468 struct loop_info *loops_info;
470 loop_dump_stream = dumpfile;
472 init_recog_no_volatile ();
474 max_reg_before_loop = max_reg_num ();
475 loop_max_reg = max_reg_before_loop;
477 regs_may_share = 0;
479 /* Count the number of loops. */
481 max_loop_num = 0;
482 for (insn = f; insn; insn = NEXT_INSN (insn))
484 if (GET_CODE (insn) == NOTE
485 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
486 max_loop_num++;
489 /* Don't waste time if no loops. */
490 if (max_loop_num == 0)
491 return;
493 loops->num = max_loop_num;
495 /* Get size to use for tables indexed by uids.
496 Leave some space for labels allocated by find_and_verify_loops. */
497 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
499 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
500 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
501 sizeof (struct loop *));
503 /* Allocate storage for array of loops. */
504 loops->array = (struct loop *)
505 xcalloc (loops->num, sizeof (struct loop));
507 /* Find and process each loop.
508 First, find them, and record them in order of their beginnings. */
509 find_and_verify_loops (f, loops);
511 /* Allocate and initialize auxiliary loop information. */
512 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
513 for (i = 0; i < loops->num; i++)
514 loops->array[i].aux = loops_info + i;
516 /* Now find all register lifetimes. This must be done after
517 find_and_verify_loops, because it might reorder the insns in the
518 function. */
519 reg_scan (f, max_reg_before_loop, 1);
521 /* This must occur after reg_scan so that registers created by gcse
522 will have entries in the register tables.
524 We could have added a call to reg_scan after gcse_main in toplev.c,
525 but moving this call to init_alias_analysis is more efficient. */
526 init_alias_analysis ();
528 /* See if we went too far. Note that get_max_uid already returns
529 one more that the maximum uid of all insn. */
530 if (get_max_uid () > max_uid_for_loop)
531 abort ();
532 /* Now reset it to the actual size we need. See above. */
533 max_uid_for_loop = get_max_uid ();
535 /* find_and_verify_loops has already called compute_luids, but it
536 might have rearranged code afterwards, so we need to recompute
537 the luids now. */
538 max_luid = compute_luids (f, NULL_RTX, 0);
540 /* Don't leave gaps in uid_luid for insns that have been
541 deleted. It is possible that the first or last insn
542 using some register has been deleted by cross-jumping.
543 Make sure that uid_luid for that former insn's uid
544 points to the general area where that insn used to be. */
545 for (i = 0; i < max_uid_for_loop; i++)
547 uid_luid[0] = uid_luid[i];
548 if (uid_luid[0] != 0)
549 break;
551 for (i = 0; i < max_uid_for_loop; i++)
552 if (uid_luid[i] == 0)
553 uid_luid[i] = uid_luid[i - 1];
555 /* Determine if the function has indirect jump. On some systems
556 this prevents low overhead loop instructions from being used. */
557 indirect_jump_in_function = indirect_jump_in_function_p (f);
559 /* Now scan the loops, last ones first, since this means inner ones are done
560 before outer ones. */
561 for (i = max_loop_num - 1; i >= 0; i--)
563 struct loop *loop = &loops->array[i];
565 if (! loop->invalid && loop->end)
566 scan_loop (loop, flags);
569 end_alias_analysis ();
571 /* Clean up. */
572 free (uid_luid);
573 free (uid_loop);
574 free (loops_info);
575 free (loops->array);
578 /* Returns the next insn, in execution order, after INSN. START and
579 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
580 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
581 insn-stream; it is used with loops that are entered near the
582 bottom. */
584 static rtx
585 next_insn_in_loop (loop, insn)
586 const struct loop *loop;
587 rtx insn;
589 insn = NEXT_INSN (insn);
591 if (insn == loop->end)
593 if (loop->top)
594 /* Go to the top of the loop, and continue there. */
595 insn = loop->top;
596 else
597 /* We're done. */
598 insn = NULL_RTX;
601 if (insn == loop->scan_start)
602 /* We're done. */
603 insn = NULL_RTX;
605 return insn;
608 /* Optimize one loop described by LOOP. */
610 /* ??? Could also move memory writes out of loops if the destination address
611 is invariant, the source is invariant, the memory write is not volatile,
612 and if we can prove that no read inside the loop can read this address
613 before the write occurs. If there is a read of this address after the
614 write, then we can also mark the memory read as invariant. */
616 static void
617 scan_loop (loop, flags)
618 struct loop *loop;
619 int flags;
621 struct loop_info *loop_info = LOOP_INFO (loop);
622 struct loop_regs *regs = LOOP_REGS (loop);
623 int i;
624 rtx loop_start = loop->start;
625 rtx loop_end = loop->end;
626 rtx p;
627 /* 1 if we are scanning insns that could be executed zero times. */
628 int maybe_never = 0;
629 /* 1 if we are scanning insns that might never be executed
630 due to a subroutine call which might exit before they are reached. */
631 int call_passed = 0;
632 /* Jump insn that enters the loop, or 0 if control drops in. */
633 rtx loop_entry_jump = 0;
634 /* Number of insns in the loop. */
635 int insn_count;
636 int tem;
637 rtx temp, update_start, update_end;
638 /* The SET from an insn, if it is the only SET in the insn. */
639 rtx set, set1;
640 /* Chain describing insns movable in current loop. */
641 struct loop_movables *movables = LOOP_MOVABLES (loop);
642 /* Ratio of extra register life span we can justify
643 for saving an instruction. More if loop doesn't call subroutines
644 since in that case saving an insn makes more difference
645 and more registers are available. */
646 int threshold;
647 /* Nonzero if we are scanning instructions in a sub-loop. */
648 int loop_depth = 0;
650 loop->top = 0;
652 movables->head = 0;
653 movables->last = 0;
655 /* Determine whether this loop starts with a jump down to a test at
656 the end. This will occur for a small number of loops with a test
657 that is too complex to duplicate in front of the loop.
659 We search for the first insn or label in the loop, skipping NOTEs.
660 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
661 (because we might have a loop executed only once that contains a
662 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
663 (in case we have a degenerate loop).
665 Note that if we mistakenly think that a loop is entered at the top
666 when, in fact, it is entered at the exit test, the only effect will be
667 slightly poorer optimization. Making the opposite error can generate
668 incorrect code. Since very few loops now start with a jump to the
669 exit test, the code here to detect that case is very conservative. */
671 for (p = NEXT_INSN (loop_start);
672 p != loop_end
673 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
674 && (GET_CODE (p) != NOTE
675 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
676 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
677 p = NEXT_INSN (p))
680 loop->scan_start = p;
682 /* If loop end is the end of the current function, then emit a
683 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
684 note insn. This is the position we use when sinking insns out of
685 the loop. */
686 if (NEXT_INSN (loop->end) != 0)
687 loop->sink = NEXT_INSN (loop->end);
688 else
689 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
691 /* Set up variables describing this loop. */
692 prescan_loop (loop);
693 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
695 /* If loop has a jump before the first label,
696 the true entry is the target of that jump.
697 Start scan from there.
698 But record in LOOP->TOP the place where the end-test jumps
699 back to so we can scan that after the end of the loop. */
700 if (GET_CODE (p) == JUMP_INSN)
702 loop_entry_jump = p;
704 /* Loop entry must be unconditional jump (and not a RETURN) */
705 if (any_uncondjump_p (p)
706 && JUMP_LABEL (p) != 0
707 /* Check to see whether the jump actually
708 jumps out of the loop (meaning it's no loop).
709 This case can happen for things like
710 do {..} while (0). If this label was generated previously
711 by loop, we can't tell anything about it and have to reject
712 the loop. */
713 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
715 loop->top = next_label (loop->scan_start);
716 loop->scan_start = JUMP_LABEL (p);
720 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
721 as required by loop_reg_used_before_p. So skip such loops. (This
722 test may never be true, but it's best to play it safe.)
724 Also, skip loops where we do not start scanning at a label. This
725 test also rejects loops starting with a JUMP_INSN that failed the
726 test above. */
728 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
729 || GET_CODE (loop->scan_start) != CODE_LABEL)
731 if (loop_dump_stream)
732 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
733 INSN_UID (loop_start), INSN_UID (loop_end));
734 return;
737 /* Allocate extra space for REGs that might be created by load_mems.
738 We allocate a little extra slop as well, in the hopes that we
739 won't have to reallocate the regs array. */
740 loop_regs_scan (loop, loop_info->mems_idx + 16);
741 insn_count = count_insns_in_loop (loop);
743 if (loop_dump_stream)
745 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
746 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
747 if (loop->cont)
748 fprintf (loop_dump_stream, "Continue at insn %d.\n",
749 INSN_UID (loop->cont));
752 /* Scan through the loop finding insns that are safe to move.
753 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
754 this reg will be considered invariant for subsequent insns.
755 We consider whether subsequent insns use the reg
756 in deciding whether it is worth actually moving.
758 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
759 and therefore it is possible that the insns we are scanning
760 would never be executed. At such times, we must make sure
761 that it is safe to execute the insn once instead of zero times.
762 When MAYBE_NEVER is 0, all insns will be executed at least once
763 so that is not a problem. */
765 for (p = next_insn_in_loop (loop, loop->scan_start);
766 p != NULL_RTX;
767 p = next_insn_in_loop (loop, p))
769 if (GET_CODE (p) == INSN
770 && (set = single_set (p))
771 && GET_CODE (SET_DEST (set)) == REG
772 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
773 && SET_DEST (set) != pic_offset_table_rtx
774 #endif
775 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
777 int tem1 = 0;
778 int tem2 = 0;
779 int move_insn = 0;
780 rtx src = SET_SRC (set);
781 rtx dependencies = 0;
783 /* Figure out what to use as a source of this insn. If a REG_EQUIV
784 note is given or if a REG_EQUAL note with a constant operand is
785 specified, use it as the source and mark that we should move
786 this insn by calling emit_move_insn rather that duplicating the
787 insn.
789 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
790 is present. */
791 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
792 if (temp)
793 src = XEXP (temp, 0), move_insn = 1;
794 else
796 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
797 if (temp && CONSTANT_P (XEXP (temp, 0)))
798 src = XEXP (temp, 0), move_insn = 1;
799 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
801 src = XEXP (temp, 0);
802 /* A libcall block can use regs that don't appear in
803 the equivalent expression. To move the libcall,
804 we must move those regs too. */
805 dependencies = libcall_other_reg (p, src);
809 /* For parallels, add any possible uses to the depencies, as we can't move
810 the insn without resolving them first. */
811 if (GET_CODE (PATTERN (p)) == PARALLEL)
813 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
815 rtx x = XVECEXP (PATTERN (p), 0, i);
816 if (GET_CODE (x) == USE)
817 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
821 /* Don't try to optimize a register that was made
822 by loop-optimization for an inner loop.
823 We don't know its life-span, so we can't compute the benefit. */
824 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
826 else if (/* The register is used in basic blocks other
827 than the one where it is set (meaning that
828 something after this point in the loop might
829 depend on its value before the set). */
830 ! reg_in_basic_block_p (p, SET_DEST (set))
831 /* And the set is not guaranteed to be executed once
832 the loop starts, or the value before the set is
833 needed before the set occurs...
835 ??? Note we have quadratic behaviour here, mitigated
836 by the fact that the previous test will often fail for
837 large loops. Rather than re-scanning the entire loop
838 each time for register usage, we should build tables
839 of the register usage and use them here instead. */
840 && (maybe_never
841 || loop_reg_used_before_p (loop, set, p)))
842 /* It is unsafe to move the set.
844 This code used to consider it OK to move a set of a variable
845 which was not created by the user and not used in an exit test.
846 That behavior is incorrect and was removed. */
848 else if ((tem = loop_invariant_p (loop, src))
849 && (dependencies == 0
850 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
851 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
852 || (tem1
853 = consec_sets_invariant_p
854 (loop, SET_DEST (set),
855 regs->array[REGNO (SET_DEST (set))].set_in_loop,
856 p)))
857 /* If the insn can cause a trap (such as divide by zero),
858 can't move it unless it's guaranteed to be executed
859 once loop is entered. Even a function call might
860 prevent the trap insn from being reached
861 (since it might exit!) */
862 && ! ((maybe_never || call_passed)
863 && may_trap_p (src)))
865 struct movable *m;
866 int regno = REGNO (SET_DEST (set));
868 /* A potential lossage is where we have a case where two insns
869 can be combined as long as they are both in the loop, but
870 we move one of them outside the loop. For large loops,
871 this can lose. The most common case of this is the address
872 of a function being called.
874 Therefore, if this register is marked as being used exactly
875 once if we are in a loop with calls (a "large loop"), see if
876 we can replace the usage of this register with the source
877 of this SET. If we can, delete this insn.
879 Don't do this if P has a REG_RETVAL note or if we have
880 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
882 if (loop_info->has_call
883 && regs->array[regno].single_usage != 0
884 && regs->array[regno].single_usage != const0_rtx
885 && REGNO_FIRST_UID (regno) == INSN_UID (p)
886 && (REGNO_LAST_UID (regno)
887 == INSN_UID (regs->array[regno].single_usage))
888 && regs->array[regno].set_in_loop == 1
889 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
890 && ! side_effects_p (SET_SRC (set))
891 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
892 && (! SMALL_REGISTER_CLASSES
893 || (! (GET_CODE (SET_SRC (set)) == REG
894 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
895 /* This test is not redundant; SET_SRC (set) might be
896 a call-clobbered register and the life of REGNO
897 might span a call. */
898 && ! modified_between_p (SET_SRC (set), p,
899 regs->array[regno].single_usage)
900 && no_labels_between_p (p, regs->array[regno].single_usage)
901 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
902 regs->array[regno].single_usage))
904 /* Replace any usage in a REG_EQUAL note. Must copy the
905 new source, so that we don't get rtx sharing between the
906 SET_SOURCE and REG_NOTES of insn p. */
907 REG_NOTES (regs->array[regno].single_usage)
908 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
909 SET_DEST (set), copy_rtx (SET_SRC (set)));
911 delete_insn (p);
912 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
913 regs->array[regno+i].set_in_loop = 0;
914 continue;
917 m = (struct movable *) xmalloc (sizeof (struct movable));
918 m->next = 0;
919 m->insn = p;
920 m->set_src = src;
921 m->dependencies = dependencies;
922 m->set_dest = SET_DEST (set);
923 m->force = 0;
924 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
925 m->done = 0;
926 m->forces = 0;
927 m->partial = 0;
928 m->move_insn = move_insn;
929 m->move_insn_first = 0;
930 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
931 m->savemode = VOIDmode;
932 m->regno = regno;
933 /* Set M->cond if either loop_invariant_p
934 or consec_sets_invariant_p returned 2
935 (only conditionally invariant). */
936 m->cond = ((tem | tem1 | tem2) > 1);
937 m->global = LOOP_REG_GLOBAL_P (loop, regno);
938 m->match = 0;
939 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
940 m->savings = regs->array[regno].n_times_set;
941 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
942 m->savings += libcall_benefit (p);
943 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
944 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
945 /* Add M to the end of the chain MOVABLES. */
946 loop_movables_add (movables, m);
948 if (m->consec > 0)
950 /* It is possible for the first instruction to have a
951 REG_EQUAL note but a non-invariant SET_SRC, so we must
952 remember the status of the first instruction in case
953 the last instruction doesn't have a REG_EQUAL note. */
954 m->move_insn_first = m->move_insn;
956 /* Skip this insn, not checking REG_LIBCALL notes. */
957 p = next_nonnote_insn (p);
958 /* Skip the consecutive insns, if there are any. */
959 p = skip_consec_insns (p, m->consec);
960 /* Back up to the last insn of the consecutive group. */
961 p = prev_nonnote_insn (p);
963 /* We must now reset m->move_insn, m->is_equiv, and possibly
964 m->set_src to correspond to the effects of all the
965 insns. */
966 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
967 if (temp)
968 m->set_src = XEXP (temp, 0), m->move_insn = 1;
969 else
971 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
972 if (temp && CONSTANT_P (XEXP (temp, 0)))
973 m->set_src = XEXP (temp, 0), m->move_insn = 1;
974 else
975 m->move_insn = 0;
978 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
981 /* If this register is always set within a STRICT_LOW_PART
982 or set to zero, then its high bytes are constant.
983 So clear them outside the loop and within the loop
984 just load the low bytes.
985 We must check that the machine has an instruction to do so.
986 Also, if the value loaded into the register
987 depends on the same register, this cannot be done. */
988 else if (SET_SRC (set) == const0_rtx
989 && GET_CODE (NEXT_INSN (p)) == INSN
990 && (set1 = single_set (NEXT_INSN (p)))
991 && GET_CODE (set1) == SET
992 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
993 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
994 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
995 == SET_DEST (set))
996 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
998 int regno = REGNO (SET_DEST (set));
999 if (regs->array[regno].set_in_loop == 2)
1001 struct movable *m;
1002 m = (struct movable *) xmalloc (sizeof (struct movable));
1003 m->next = 0;
1004 m->insn = p;
1005 m->set_dest = SET_DEST (set);
1006 m->dependencies = 0;
1007 m->force = 0;
1008 m->consec = 0;
1009 m->done = 0;
1010 m->forces = 0;
1011 m->move_insn = 0;
1012 m->move_insn_first = 0;
1013 m->partial = 1;
1014 /* If the insn may not be executed on some cycles,
1015 we can't clear the whole reg; clear just high part.
1016 Not even if the reg is used only within this loop.
1017 Consider this:
1018 while (1)
1019 while (s != t) {
1020 if (foo ()) x = *s;
1021 use (x);
1023 Clearing x before the inner loop could clobber a value
1024 being saved from the last time around the outer loop.
1025 However, if the reg is not used outside this loop
1026 and all uses of the register are in the same
1027 basic block as the store, there is no problem.
1029 If this insn was made by loop, we don't know its
1030 INSN_LUID and hence must make a conservative
1031 assumption. */
1032 m->global = (INSN_UID (p) >= max_uid_for_loop
1033 || LOOP_REG_GLOBAL_P (loop, regno)
1034 || (labels_in_range_p
1035 (p, REGNO_FIRST_LUID (regno))));
1036 if (maybe_never && m->global)
1037 m->savemode = GET_MODE (SET_SRC (set1));
1038 else
1039 m->savemode = VOIDmode;
1040 m->regno = regno;
1041 m->cond = 0;
1042 m->match = 0;
1043 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1044 m->savings = 1;
1045 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1046 regs->array[regno+i].set_in_loop = -1;
1047 /* Add M to the end of the chain MOVABLES. */
1048 loop_movables_add (movables, m);
1052 /* Past a call insn, we get to insns which might not be executed
1053 because the call might exit. This matters for insns that trap.
1054 Constant and pure call insns always return, so they don't count. */
1055 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1056 call_passed = 1;
1057 /* Past a label or a jump, we get to insns for which we
1058 can't count on whether or how many times they will be
1059 executed during each iteration. Therefore, we can
1060 only move out sets of trivial variables
1061 (those not used after the loop). */
1062 /* Similar code appears twice in strength_reduce. */
1063 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1064 /* If we enter the loop in the middle, and scan around to the
1065 beginning, don't set maybe_never for that. This must be an
1066 unconditional jump, otherwise the code at the top of the
1067 loop might never be executed. Unconditional jumps are
1068 followed by a barrier then the loop_end. */
1069 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1070 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1071 && any_uncondjump_p (p)))
1072 maybe_never = 1;
1073 else if (GET_CODE (p) == NOTE)
1075 /* At the virtual top of a converted loop, insns are again known to
1076 be executed: logically, the loop begins here even though the exit
1077 code has been duplicated. */
1078 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1079 maybe_never = call_passed = 0;
1080 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1081 loop_depth++;
1082 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1083 loop_depth--;
1087 /* If one movable subsumes another, ignore that other. */
1089 ignore_some_movables (movables);
1091 /* For each movable insn, see if the reg that it loads
1092 leads when it dies right into another conditionally movable insn.
1093 If so, record that the second insn "forces" the first one,
1094 since the second can be moved only if the first is. */
1096 force_movables (movables);
1098 /* See if there are multiple movable insns that load the same value.
1099 If there are, make all but the first point at the first one
1100 through the `match' field, and add the priorities of them
1101 all together as the priority of the first. */
1103 combine_movables (movables, regs);
1105 /* Now consider each movable insn to decide whether it is worth moving.
1106 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1108 Generally this increases code size, so do not move moveables when
1109 optimizing for code size. */
1111 if (! optimize_size)
1113 move_movables (loop, movables, threshold, insn_count);
1115 /* Recalculate regs->array if move_movables has created new
1116 registers. */
1117 if (max_reg_num () > regs->num)
1119 loop_regs_scan (loop, 0);
1120 for (update_start = loop_start;
1121 PREV_INSN (update_start)
1122 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1123 update_start = PREV_INSN (update_start))
1125 update_end = NEXT_INSN (loop_end);
1127 reg_scan_update (update_start, update_end, loop_max_reg);
1128 loop_max_reg = max_reg_num ();
1132 /* Now candidates that still are negative are those not moved.
1133 Change regs->array[I].set_in_loop to indicate that those are not actually
1134 invariant. */
1135 for (i = 0; i < regs->num; i++)
1136 if (regs->array[i].set_in_loop < 0)
1137 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1139 /* Now that we've moved some things out of the loop, we might be able to
1140 hoist even more memory references. */
1141 load_mems (loop);
1143 /* Recalculate regs->array if load_mems has created new registers. */
1144 if (max_reg_num () > regs->num)
1145 loop_regs_scan (loop, 0);
1147 for (update_start = loop_start;
1148 PREV_INSN (update_start)
1149 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1150 update_start = PREV_INSN (update_start))
1152 update_end = NEXT_INSN (loop_end);
1154 reg_scan_update (update_start, update_end, loop_max_reg);
1155 loop_max_reg = max_reg_num ();
1157 if (flag_strength_reduce)
1159 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1160 /* Ensure our label doesn't go away. */
1161 LABEL_NUSES (update_end)++;
1163 strength_reduce (loop, flags);
1165 reg_scan_update (update_start, update_end, loop_max_reg);
1166 loop_max_reg = max_reg_num ();
1168 if (update_end && GET_CODE (update_end) == CODE_LABEL
1169 && --LABEL_NUSES (update_end) == 0)
1170 delete_related_insns (update_end);
1174 /* The movable information is required for strength reduction. */
1175 loop_movables_free (movables);
1177 free (regs->array);
1178 regs->array = 0;
1179 regs->num = 0;
1182 /* Add elements to *OUTPUT to record all the pseudo-regs
1183 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1185 void
1186 record_excess_regs (in_this, not_in_this, output)
1187 rtx in_this, not_in_this;
1188 rtx *output;
1190 enum rtx_code code;
1191 const char *fmt;
1192 int i;
1194 code = GET_CODE (in_this);
1196 switch (code)
1198 case PC:
1199 case CC0:
1200 case CONST_INT:
1201 case CONST_DOUBLE:
1202 case CONST:
1203 case SYMBOL_REF:
1204 case LABEL_REF:
1205 return;
1207 case REG:
1208 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1209 && ! reg_mentioned_p (in_this, not_in_this))
1210 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1211 return;
1213 default:
1214 break;
1217 fmt = GET_RTX_FORMAT (code);
1218 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1220 int j;
1222 switch (fmt[i])
1224 case 'E':
1225 for (j = 0; j < XVECLEN (in_this, i); j++)
1226 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1227 break;
1229 case 'e':
1230 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1231 break;
1236 /* Check what regs are referred to in the libcall block ending with INSN,
1237 aside from those mentioned in the equivalent value.
1238 If there are none, return 0.
1239 If there are one or more, return an EXPR_LIST containing all of them. */
1242 libcall_other_reg (insn, equiv)
1243 rtx insn, equiv;
1245 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1246 rtx p = XEXP (note, 0);
1247 rtx output = 0;
1249 /* First, find all the regs used in the libcall block
1250 that are not mentioned as inputs to the result. */
1252 while (p != insn)
1254 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1255 || GET_CODE (p) == CALL_INSN)
1256 record_excess_regs (PATTERN (p), equiv, &output);
1257 p = NEXT_INSN (p);
1260 return output;
1263 /* Return 1 if all uses of REG
1264 are between INSN and the end of the basic block. */
1266 static int
1267 reg_in_basic_block_p (insn, reg)
1268 rtx insn, reg;
1270 int regno = REGNO (reg);
1271 rtx p;
1273 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1274 return 0;
1276 /* Search this basic block for the already recorded last use of the reg. */
1277 for (p = insn; p; p = NEXT_INSN (p))
1279 switch (GET_CODE (p))
1281 case NOTE:
1282 break;
1284 case INSN:
1285 case CALL_INSN:
1286 /* Ordinary insn: if this is the last use, we win. */
1287 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1288 return 1;
1289 break;
1291 case JUMP_INSN:
1292 /* Jump insn: if this is the last use, we win. */
1293 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1294 return 1;
1295 /* Otherwise, it's the end of the basic block, so we lose. */
1296 return 0;
1298 case CODE_LABEL:
1299 case BARRIER:
1300 /* It's the end of the basic block, so we lose. */
1301 return 0;
1303 default:
1304 break;
1308 /* The "last use" that was recorded can't be found after the first
1309 use. This can happen when the last use was deleted while
1310 processing an inner loop, this inner loop was then completely
1311 unrolled, and the outer loop is always exited after the inner loop,
1312 so that everything after the first use becomes a single basic block. */
1313 return 1;
1316 /* Compute the benefit of eliminating the insns in the block whose
1317 last insn is LAST. This may be a group of insns used to compute a
1318 value directly or can contain a library call. */
1320 static int
1321 libcall_benefit (last)
1322 rtx last;
1324 rtx insn;
1325 int benefit = 0;
1327 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1328 insn != last; insn = NEXT_INSN (insn))
1330 if (GET_CODE (insn) == CALL_INSN)
1331 benefit += 10; /* Assume at least this many insns in a library
1332 routine. */
1333 else if (GET_CODE (insn) == INSN
1334 && GET_CODE (PATTERN (insn)) != USE
1335 && GET_CODE (PATTERN (insn)) != CLOBBER)
1336 benefit++;
1339 return benefit;
1342 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1344 static rtx
1345 skip_consec_insns (insn, count)
1346 rtx insn;
1347 int count;
1349 for (; count > 0; count--)
1351 rtx temp;
1353 /* If first insn of libcall sequence, skip to end. */
1354 /* Do this at start of loop, since INSN is guaranteed to
1355 be an insn here. */
1356 if (GET_CODE (insn) != NOTE
1357 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1358 insn = XEXP (temp, 0);
1361 insn = NEXT_INSN (insn);
1362 while (GET_CODE (insn) == NOTE);
1365 return insn;
1368 /* Ignore any movable whose insn falls within a libcall
1369 which is part of another movable.
1370 We make use of the fact that the movable for the libcall value
1371 was made later and so appears later on the chain. */
1373 static void
1374 ignore_some_movables (movables)
1375 struct loop_movables *movables;
1377 struct movable *m, *m1;
1379 for (m = movables->head; m; m = m->next)
1381 /* Is this a movable for the value of a libcall? */
1382 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1383 if (note)
1385 rtx insn;
1386 /* Check for earlier movables inside that range,
1387 and mark them invalid. We cannot use LUIDs here because
1388 insns created by loop.c for prior loops don't have LUIDs.
1389 Rather than reject all such insns from movables, we just
1390 explicitly check each insn in the libcall (since invariant
1391 libcalls aren't that common). */
1392 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1393 for (m1 = movables->head; m1 != m; m1 = m1->next)
1394 if (m1->insn == insn)
1395 m1->done = 1;
1400 /* For each movable insn, see if the reg that it loads
1401 leads when it dies right into another conditionally movable insn.
1402 If so, record that the second insn "forces" the first one,
1403 since the second can be moved only if the first is. */
1405 static void
1406 force_movables (movables)
1407 struct loop_movables *movables;
1409 struct movable *m, *m1;
1411 for (m1 = movables->head; m1; m1 = m1->next)
1412 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1413 if (!m1->partial && !m1->done)
1415 int regno = m1->regno;
1416 for (m = m1->next; m; m = m->next)
1417 /* ??? Could this be a bug? What if CSE caused the
1418 register of M1 to be used after this insn?
1419 Since CSE does not update regno_last_uid,
1420 this insn M->insn might not be where it dies.
1421 But very likely this doesn't matter; what matters is
1422 that M's reg is computed from M1's reg. */
1423 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1424 && !m->done)
1425 break;
1426 if (m != 0 && m->set_src == m1->set_dest
1427 /* If m->consec, m->set_src isn't valid. */
1428 && m->consec == 0)
1429 m = 0;
1431 /* Increase the priority of the moving the first insn
1432 since it permits the second to be moved as well. */
1433 if (m != 0)
1435 m->forces = m1;
1436 m1->lifetime += m->lifetime;
1437 m1->savings += m->savings;
1442 /* Find invariant expressions that are equal and can be combined into
1443 one register. */
1445 static void
1446 combine_movables (movables, regs)
1447 struct loop_movables *movables;
1448 struct loop_regs *regs;
1450 struct movable *m;
1451 char *matched_regs = (char *) xmalloc (regs->num);
1452 enum machine_mode mode;
1454 /* Regs that are set more than once are not allowed to match
1455 or be matched. I'm no longer sure why not. */
1456 /* Only pseudo registers are allowed to match or be matched,
1457 since move_movables does not validate the change. */
1458 /* Perhaps testing m->consec_sets would be more appropriate here? */
1460 for (m = movables->head; m; m = m->next)
1461 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1462 && m->regno >= FIRST_PSEUDO_REGISTER
1463 && !m->partial)
1465 struct movable *m1;
1466 int regno = m->regno;
1468 memset (matched_regs, 0, regs->num);
1469 matched_regs[regno] = 1;
1471 /* We want later insns to match the first one. Don't make the first
1472 one match any later ones. So start this loop at m->next. */
1473 for (m1 = m->next; m1; m1 = m1->next)
1474 if (m != m1 && m1->match == 0
1475 && regs->array[m1->regno].n_times_set == 1
1476 && m1->regno >= FIRST_PSEUDO_REGISTER
1477 /* A reg used outside the loop mustn't be eliminated. */
1478 && !m1->global
1479 /* A reg used for zero-extending mustn't be eliminated. */
1480 && !m1->partial
1481 && (matched_regs[m1->regno]
1484 /* Can combine regs with different modes loaded from the
1485 same constant only if the modes are the same or
1486 if both are integer modes with M wider or the same
1487 width as M1. The check for integer is redundant, but
1488 safe, since the only case of differing destination
1489 modes with equal sources is when both sources are
1490 VOIDmode, i.e., CONST_INT. */
1491 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1492 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1493 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1494 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1495 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1496 /* See if the source of M1 says it matches M. */
1497 && ((GET_CODE (m1->set_src) == REG
1498 && matched_regs[REGNO (m1->set_src)])
1499 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1500 movables, regs))))
1501 && ((m->dependencies == m1->dependencies)
1502 || rtx_equal_p (m->dependencies, m1->dependencies)))
1504 m->lifetime += m1->lifetime;
1505 m->savings += m1->savings;
1506 m1->done = 1;
1507 m1->match = m;
1508 matched_regs[m1->regno] = 1;
1512 /* Now combine the regs used for zero-extension.
1513 This can be done for those not marked `global'
1514 provided their lives don't overlap. */
1516 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1517 mode = GET_MODE_WIDER_MODE (mode))
1519 struct movable *m0 = 0;
1521 /* Combine all the registers for extension from mode MODE.
1522 Don't combine any that are used outside this loop. */
1523 for (m = movables->head; m; m = m->next)
1524 if (m->partial && ! m->global
1525 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1527 struct movable *m1;
1529 int first = REGNO_FIRST_LUID (m->regno);
1530 int last = REGNO_LAST_LUID (m->regno);
1532 if (m0 == 0)
1534 /* First one: don't check for overlap, just record it. */
1535 m0 = m;
1536 continue;
1539 /* Make sure they extend to the same mode.
1540 (Almost always true.) */
1541 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1542 continue;
1544 /* We already have one: check for overlap with those
1545 already combined together. */
1546 for (m1 = movables->head; m1 != m; m1 = m1->next)
1547 if (m1 == m0 || (m1->partial && m1->match == m0))
1548 if (! (REGNO_FIRST_LUID (m1->regno) > last
1549 || REGNO_LAST_LUID (m1->regno) < first))
1550 goto overlap;
1552 /* No overlap: we can combine this with the others. */
1553 m0->lifetime += m->lifetime;
1554 m0->savings += m->savings;
1555 m->done = 1;
1556 m->match = m0;
1558 overlap:
1563 /* Clean up. */
1564 free (matched_regs);
1567 /* Returns the number of movable instructions in LOOP that were not
1568 moved outside the loop. */
1570 static int
1571 num_unmoved_movables (loop)
1572 const struct loop *loop;
1574 int num = 0;
1575 struct movable *m;
1577 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1578 if (!m->done)
1579 ++num;
1581 return num;
1585 /* Return 1 if regs X and Y will become the same if moved. */
1587 static int
1588 regs_match_p (x, y, movables)
1589 rtx x, y;
1590 struct loop_movables *movables;
1592 unsigned int xn = REGNO (x);
1593 unsigned int yn = REGNO (y);
1594 struct movable *mx, *my;
1596 for (mx = movables->head; mx; mx = mx->next)
1597 if (mx->regno == xn)
1598 break;
1600 for (my = movables->head; my; my = my->next)
1601 if (my->regno == yn)
1602 break;
1604 return (mx && my
1605 && ((mx->match == my->match && mx->match != 0)
1606 || mx->match == my
1607 || mx == my->match));
1610 /* Return 1 if X and Y are identical-looking rtx's.
1611 This is the Lisp function EQUAL for rtx arguments.
1613 If two registers are matching movables or a movable register and an
1614 equivalent constant, consider them equal. */
1616 static int
1617 rtx_equal_for_loop_p (x, y, movables, regs)
1618 rtx x, y;
1619 struct loop_movables *movables;
1620 struct loop_regs *regs;
1622 int i;
1623 int j;
1624 struct movable *m;
1625 enum rtx_code code;
1626 const char *fmt;
1628 if (x == y)
1629 return 1;
1630 if (x == 0 || y == 0)
1631 return 0;
1633 code = GET_CODE (x);
1635 /* If we have a register and a constant, they may sometimes be
1636 equal. */
1637 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1638 && CONSTANT_P (y))
1640 for (m = movables->head; m; m = m->next)
1641 if (m->move_insn && m->regno == REGNO (x)
1642 && rtx_equal_p (m->set_src, y))
1643 return 1;
1645 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1646 && CONSTANT_P (x))
1648 for (m = movables->head; m; m = m->next)
1649 if (m->move_insn && m->regno == REGNO (y)
1650 && rtx_equal_p (m->set_src, x))
1651 return 1;
1654 /* Otherwise, rtx's of different codes cannot be equal. */
1655 if (code != GET_CODE (y))
1656 return 0;
1658 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1659 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1661 if (GET_MODE (x) != GET_MODE (y))
1662 return 0;
1664 /* These three types of rtx's can be compared nonrecursively. */
1665 if (code == REG)
1666 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1668 if (code == LABEL_REF)
1669 return XEXP (x, 0) == XEXP (y, 0);
1670 if (code == SYMBOL_REF)
1671 return XSTR (x, 0) == XSTR (y, 0);
1673 /* Compare the elements. If any pair of corresponding elements
1674 fail to match, return 0 for the whole things. */
1676 fmt = GET_RTX_FORMAT (code);
1677 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1679 switch (fmt[i])
1681 case 'w':
1682 if (XWINT (x, i) != XWINT (y, i))
1683 return 0;
1684 break;
1686 case 'i':
1687 if (XINT (x, i) != XINT (y, i))
1688 return 0;
1689 break;
1691 case 'E':
1692 /* Two vectors must have the same length. */
1693 if (XVECLEN (x, i) != XVECLEN (y, i))
1694 return 0;
1696 /* And the corresponding elements must match. */
1697 for (j = 0; j < XVECLEN (x, i); j++)
1698 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1699 movables, regs) == 0)
1700 return 0;
1701 break;
1703 case 'e':
1704 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1705 == 0)
1706 return 0;
1707 break;
1709 case 's':
1710 if (strcmp (XSTR (x, i), XSTR (y, i)))
1711 return 0;
1712 break;
1714 case 'u':
1715 /* These are just backpointers, so they don't matter. */
1716 break;
1718 case '0':
1719 break;
1721 /* It is believed that rtx's at this level will never
1722 contain anything but integers and other rtx's,
1723 except for within LABEL_REFs and SYMBOL_REFs. */
1724 default:
1725 abort ();
1728 return 1;
1731 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1732 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1733 references is incremented once for each added note. */
1735 static void
1736 add_label_notes (x, insns)
1737 rtx x;
1738 rtx insns;
1740 enum rtx_code code = GET_CODE (x);
1741 int i, j;
1742 const char *fmt;
1743 rtx insn;
1745 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1747 /* This code used to ignore labels that referred to dispatch tables to
1748 avoid flow generating (slighly) worse code.
1750 We no longer ignore such label references (see LABEL_REF handling in
1751 mark_jump_label for additional information). */
1752 for (insn = insns; insn; insn = NEXT_INSN (insn))
1753 if (reg_mentioned_p (XEXP (x, 0), insn))
1755 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1756 REG_NOTES (insn));
1757 if (LABEL_P (XEXP (x, 0)))
1758 LABEL_NUSES (XEXP (x, 0))++;
1762 fmt = GET_RTX_FORMAT (code);
1763 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1765 if (fmt[i] == 'e')
1766 add_label_notes (XEXP (x, i), insns);
1767 else if (fmt[i] == 'E')
1768 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1769 add_label_notes (XVECEXP (x, i, j), insns);
1773 /* Scan MOVABLES, and move the insns that deserve to be moved.
1774 If two matching movables are combined, replace one reg with the
1775 other throughout. */
1777 static void
1778 move_movables (loop, movables, threshold, insn_count)
1779 struct loop *loop;
1780 struct loop_movables *movables;
1781 int threshold;
1782 int insn_count;
1784 struct loop_regs *regs = LOOP_REGS (loop);
1785 int nregs = regs->num;
1786 rtx new_start = 0;
1787 struct movable *m;
1788 rtx p;
1789 rtx loop_start = loop->start;
1790 rtx loop_end = loop->end;
1791 /* Map of pseudo-register replacements to handle combining
1792 when we move several insns that load the same value
1793 into different pseudo-registers. */
1794 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1795 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1797 for (m = movables->head; m; m = m->next)
1799 /* Describe this movable insn. */
1801 if (loop_dump_stream)
1803 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1804 INSN_UID (m->insn), m->regno, m->lifetime);
1805 if (m->consec > 0)
1806 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1807 if (m->cond)
1808 fprintf (loop_dump_stream, "cond ");
1809 if (m->force)
1810 fprintf (loop_dump_stream, "force ");
1811 if (m->global)
1812 fprintf (loop_dump_stream, "global ");
1813 if (m->done)
1814 fprintf (loop_dump_stream, "done ");
1815 if (m->move_insn)
1816 fprintf (loop_dump_stream, "move-insn ");
1817 if (m->match)
1818 fprintf (loop_dump_stream, "matches %d ",
1819 INSN_UID (m->match->insn));
1820 if (m->forces)
1821 fprintf (loop_dump_stream, "forces %d ",
1822 INSN_UID (m->forces->insn));
1825 /* Ignore the insn if it's already done (it matched something else).
1826 Otherwise, see if it is now safe to move. */
1828 if (!m->done
1829 && (! m->cond
1830 || (1 == loop_invariant_p (loop, m->set_src)
1831 && (m->dependencies == 0
1832 || 1 == loop_invariant_p (loop, m->dependencies))
1833 && (m->consec == 0
1834 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1835 m->consec + 1,
1836 m->insn))))
1837 && (! m->forces || m->forces->done))
1839 int regno;
1840 rtx p;
1841 int savings = m->savings;
1843 /* We have an insn that is safe to move.
1844 Compute its desirability. */
1846 p = m->insn;
1847 regno = m->regno;
1849 if (loop_dump_stream)
1850 fprintf (loop_dump_stream, "savings %d ", savings);
1852 if (regs->array[regno].moved_once && loop_dump_stream)
1853 fprintf (loop_dump_stream, "halved since already moved ");
1855 /* An insn MUST be moved if we already moved something else
1856 which is safe only if this one is moved too: that is,
1857 if already_moved[REGNO] is nonzero. */
1859 /* An insn is desirable to move if the new lifetime of the
1860 register is no more than THRESHOLD times the old lifetime.
1861 If it's not desirable, it means the loop is so big
1862 that moving won't speed things up much,
1863 and it is liable to make register usage worse. */
1865 /* It is also desirable to move if it can be moved at no
1866 extra cost because something else was already moved. */
1868 if (already_moved[regno]
1869 || flag_move_all_movables
1870 || (threshold * savings * m->lifetime) >=
1871 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1872 || (m->forces && m->forces->done
1873 && regs->array[m->forces->regno].n_times_set == 1))
1875 int count;
1876 struct movable *m1;
1877 rtx first = NULL_RTX;
1879 /* Now move the insns that set the reg. */
1881 if (m->partial && m->match)
1883 rtx newpat, i1;
1884 rtx r1, r2;
1885 /* Find the end of this chain of matching regs.
1886 Thus, we load each reg in the chain from that one reg.
1887 And that reg is loaded with 0 directly,
1888 since it has ->match == 0. */
1889 for (m1 = m; m1->match; m1 = m1->match);
1890 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1891 SET_DEST (PATTERN (m1->insn)));
1892 i1 = loop_insn_hoist (loop, newpat);
1894 /* Mark the moved, invariant reg as being allowed to
1895 share a hard reg with the other matching invariant. */
1896 REG_NOTES (i1) = REG_NOTES (m->insn);
1897 r1 = SET_DEST (PATTERN (m->insn));
1898 r2 = SET_DEST (PATTERN (m1->insn));
1899 regs_may_share
1900 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1901 gen_rtx_EXPR_LIST (VOIDmode, r2,
1902 regs_may_share));
1903 delete_insn (m->insn);
1905 if (new_start == 0)
1906 new_start = i1;
1908 if (loop_dump_stream)
1909 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1911 /* If we are to re-generate the item being moved with a
1912 new move insn, first delete what we have and then emit
1913 the move insn before the loop. */
1914 else if (m->move_insn)
1916 rtx i1, temp, seq;
1918 for (count = m->consec; count >= 0; count--)
1920 /* If this is the first insn of a library call sequence,
1921 skip to the end. */
1922 if (GET_CODE (p) != NOTE
1923 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1924 p = XEXP (temp, 0);
1926 /* If this is the last insn of a libcall sequence, then
1927 delete every insn in the sequence except the last.
1928 The last insn is handled in the normal manner. */
1929 if (GET_CODE (p) != NOTE
1930 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1932 temp = XEXP (temp, 0);
1933 while (temp != p)
1934 temp = delete_insn (temp);
1937 temp = p;
1938 p = delete_insn (p);
1940 /* simplify_giv_expr expects that it can walk the insns
1941 at m->insn forwards and see this old sequence we are
1942 tossing here. delete_insn does preserve the next
1943 pointers, but when we skip over a NOTE we must fix
1944 it up. Otherwise that code walks into the non-deleted
1945 insn stream. */
1946 while (p && GET_CODE (p) == NOTE)
1947 p = NEXT_INSN (temp) = NEXT_INSN (p);
1950 start_sequence ();
1951 emit_move_insn (m->set_dest, m->set_src);
1952 temp = get_insns ();
1953 seq = gen_sequence ();
1954 end_sequence ();
1956 add_label_notes (m->set_src, temp);
1958 i1 = loop_insn_hoist (loop, seq);
1959 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1960 set_unique_reg_note (i1,
1961 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1962 m->set_src);
1964 if (loop_dump_stream)
1965 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1967 /* The more regs we move, the less we like moving them. */
1968 threshold -= 3;
1970 else
1972 for (count = m->consec; count >= 0; count--)
1974 rtx i1, temp;
1976 /* If first insn of libcall sequence, skip to end. */
1977 /* Do this at start of loop, since p is guaranteed to
1978 be an insn here. */
1979 if (GET_CODE (p) != NOTE
1980 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1981 p = XEXP (temp, 0);
1983 /* If last insn of libcall sequence, move all
1984 insns except the last before the loop. The last
1985 insn is handled in the normal manner. */
1986 if (GET_CODE (p) != NOTE
1987 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1989 rtx fn_address = 0;
1990 rtx fn_reg = 0;
1991 rtx fn_address_insn = 0;
1993 first = 0;
1994 for (temp = XEXP (temp, 0); temp != p;
1995 temp = NEXT_INSN (temp))
1997 rtx body;
1998 rtx n;
1999 rtx next;
2001 if (GET_CODE (temp) == NOTE)
2002 continue;
2004 body = PATTERN (temp);
2006 /* Find the next insn after TEMP,
2007 not counting USE or NOTE insns. */
2008 for (next = NEXT_INSN (temp); next != p;
2009 next = NEXT_INSN (next))
2010 if (! (GET_CODE (next) == INSN
2011 && GET_CODE (PATTERN (next)) == USE)
2012 && GET_CODE (next) != NOTE)
2013 break;
2015 /* If that is the call, this may be the insn
2016 that loads the function address.
2018 Extract the function address from the insn
2019 that loads it into a register.
2020 If this insn was cse'd, we get incorrect code.
2022 So emit a new move insn that copies the
2023 function address into the register that the
2024 call insn will use. flow.c will delete any
2025 redundant stores that we have created. */
2026 if (GET_CODE (next) == CALL_INSN
2027 && GET_CODE (body) == SET
2028 && GET_CODE (SET_DEST (body)) == REG
2029 && (n = find_reg_note (temp, REG_EQUAL,
2030 NULL_RTX)))
2032 fn_reg = SET_SRC (body);
2033 if (GET_CODE (fn_reg) != REG)
2034 fn_reg = SET_DEST (body);
2035 fn_address = XEXP (n, 0);
2036 fn_address_insn = temp;
2038 /* We have the call insn.
2039 If it uses the register we suspect it might,
2040 load it with the correct address directly. */
2041 if (GET_CODE (temp) == CALL_INSN
2042 && fn_address != 0
2043 && reg_referenced_p (fn_reg, body))
2044 loop_insn_emit_after (loop, 0, fn_address_insn,
2045 gen_move_insn
2046 (fn_reg, fn_address));
2048 if (GET_CODE (temp) == CALL_INSN)
2050 i1 = loop_call_insn_hoist (loop, body);
2051 /* Because the USAGE information potentially
2052 contains objects other than hard registers
2053 we need to copy it. */
2054 if (CALL_INSN_FUNCTION_USAGE (temp))
2055 CALL_INSN_FUNCTION_USAGE (i1)
2056 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2058 else
2059 i1 = loop_insn_hoist (loop, body);
2060 if (first == 0)
2061 first = i1;
2062 if (temp == fn_address_insn)
2063 fn_address_insn = i1;
2064 REG_NOTES (i1) = REG_NOTES (temp);
2065 REG_NOTES (temp) = NULL;
2066 delete_insn (temp);
2068 if (new_start == 0)
2069 new_start = first;
2071 if (m->savemode != VOIDmode)
2073 /* P sets REG to zero; but we should clear only
2074 the bits that are not covered by the mode
2075 m->savemode. */
2076 rtx reg = m->set_dest;
2077 rtx sequence;
2078 rtx tem;
2080 start_sequence ();
2081 tem = expand_simple_binop
2082 (GET_MODE (reg), AND, reg,
2083 GEN_INT ((((HOST_WIDE_INT) 1
2084 << GET_MODE_BITSIZE (m->savemode)))
2085 - 1),
2086 reg, 1, OPTAB_LIB_WIDEN);
2087 if (tem == 0)
2088 abort ();
2089 if (tem != reg)
2090 emit_move_insn (reg, tem);
2091 sequence = gen_sequence ();
2092 end_sequence ();
2093 i1 = loop_insn_hoist (loop, sequence);
2095 else if (GET_CODE (p) == CALL_INSN)
2097 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2098 /* Because the USAGE information potentially
2099 contains objects other than hard registers
2100 we need to copy it. */
2101 if (CALL_INSN_FUNCTION_USAGE (p))
2102 CALL_INSN_FUNCTION_USAGE (i1)
2103 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2105 else if (count == m->consec && m->move_insn_first)
2107 rtx seq;
2108 /* The SET_SRC might not be invariant, so we must
2109 use the REG_EQUAL note. */
2110 start_sequence ();
2111 emit_move_insn (m->set_dest, m->set_src);
2112 temp = get_insns ();
2113 seq = gen_sequence ();
2114 end_sequence ();
2116 add_label_notes (m->set_src, temp);
2118 i1 = loop_insn_hoist (loop, seq);
2119 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2120 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2121 : REG_EQUAL, m->set_src);
2123 else
2124 i1 = loop_insn_hoist (loop, PATTERN (p));
2126 if (REG_NOTES (i1) == 0)
2128 REG_NOTES (i1) = REG_NOTES (p);
2129 REG_NOTES (p) = NULL;
2131 /* If there is a REG_EQUAL note present whose value
2132 is not loop invariant, then delete it, since it
2133 may cause problems with later optimization passes.
2134 It is possible for cse to create such notes
2135 like this as a result of record_jump_cond. */
2137 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2138 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2139 remove_note (i1, temp);
2142 if (new_start == 0)
2143 new_start = i1;
2145 if (loop_dump_stream)
2146 fprintf (loop_dump_stream, " moved to %d",
2147 INSN_UID (i1));
2149 /* If library call, now fix the REG_NOTES that contain
2150 insn pointers, namely REG_LIBCALL on FIRST
2151 and REG_RETVAL on I1. */
2152 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2154 XEXP (temp, 0) = first;
2155 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2156 XEXP (temp, 0) = i1;
2159 temp = p;
2160 delete_insn (p);
2161 p = NEXT_INSN (p);
2163 /* simplify_giv_expr expects that it can walk the insns
2164 at m->insn forwards and see this old sequence we are
2165 tossing here. delete_insn does preserve the next
2166 pointers, but when we skip over a NOTE we must fix
2167 it up. Otherwise that code walks into the non-deleted
2168 insn stream. */
2169 while (p && GET_CODE (p) == NOTE)
2170 p = NEXT_INSN (temp) = NEXT_INSN (p);
2173 /* The more regs we move, the less we like moving them. */
2174 threshold -= 3;
2177 /* Any other movable that loads the same register
2178 MUST be moved. */
2179 already_moved[regno] = 1;
2181 /* This reg has been moved out of one loop. */
2182 regs->array[regno].moved_once = 1;
2184 /* The reg set here is now invariant. */
2185 if (! m->partial)
2187 int i;
2188 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2189 regs->array[regno+i].set_in_loop = 0;
2192 m->done = 1;
2194 /* Change the length-of-life info for the register
2195 to say it lives at least the full length of this loop.
2196 This will help guide optimizations in outer loops. */
2198 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2199 /* This is the old insn before all the moved insns.
2200 We can't use the moved insn because it is out of range
2201 in uid_luid. Only the old insns have luids. */
2202 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2203 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2204 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2206 /* Combine with this moved insn any other matching movables. */
2208 if (! m->partial)
2209 for (m1 = movables->head; m1; m1 = m1->next)
2210 if (m1->match == m)
2212 rtx temp;
2214 /* Schedule the reg loaded by M1
2215 for replacement so that shares the reg of M.
2216 If the modes differ (only possible in restricted
2217 circumstances, make a SUBREG.
2219 Note this assumes that the target dependent files
2220 treat REG and SUBREG equally, including within
2221 GO_IF_LEGITIMATE_ADDRESS and in all the
2222 predicates since we never verify that replacing the
2223 original register with a SUBREG results in a
2224 recognizable insn. */
2225 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2226 reg_map[m1->regno] = m->set_dest;
2227 else
2228 reg_map[m1->regno]
2229 = gen_lowpart_common (GET_MODE (m1->set_dest),
2230 m->set_dest);
2232 /* Get rid of the matching insn
2233 and prevent further processing of it. */
2234 m1->done = 1;
2236 /* if library call, delete all insns. */
2237 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2238 NULL_RTX)))
2239 delete_insn_chain (XEXP (temp, 0), m1->insn);
2240 else
2241 delete_insn (m1->insn);
2243 /* Any other movable that loads the same register
2244 MUST be moved. */
2245 already_moved[m1->regno] = 1;
2247 /* The reg merged here is now invariant,
2248 if the reg it matches is invariant. */
2249 if (! m->partial)
2251 int i;
2252 for (i = 0;
2253 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2254 i++)
2255 regs->array[m1->regno+i].set_in_loop = 0;
2259 else if (loop_dump_stream)
2260 fprintf (loop_dump_stream, "not desirable");
2262 else if (loop_dump_stream && !m->match)
2263 fprintf (loop_dump_stream, "not safe");
2265 if (loop_dump_stream)
2266 fprintf (loop_dump_stream, "\n");
2269 if (new_start == 0)
2270 new_start = loop_start;
2272 /* Go through all the instructions in the loop, making
2273 all the register substitutions scheduled in REG_MAP. */
2274 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2275 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2276 || GET_CODE (p) == CALL_INSN)
2278 replace_regs (PATTERN (p), reg_map, nregs, 0);
2279 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2280 INSN_CODE (p) = -1;
2283 /* Clean up. */
2284 free (reg_map);
2285 free (already_moved);
2289 static void
2290 loop_movables_add (movables, m)
2291 struct loop_movables *movables;
2292 struct movable *m;
2294 if (movables->head == 0)
2295 movables->head = m;
2296 else
2297 movables->last->next = m;
2298 movables->last = m;
2302 static void
2303 loop_movables_free (movables)
2304 struct loop_movables *movables;
2306 struct movable *m;
2307 struct movable *m_next;
2309 for (m = movables->head; m; m = m_next)
2311 m_next = m->next;
2312 free (m);
2316 #if 0
2317 /* Scan X and replace the address of any MEM in it with ADDR.
2318 REG is the address that MEM should have before the replacement. */
2320 static void
2321 replace_call_address (x, reg, addr)
2322 rtx x, reg, addr;
2324 enum rtx_code code;
2325 int i;
2326 const char *fmt;
2328 if (x == 0)
2329 return;
2330 code = GET_CODE (x);
2331 switch (code)
2333 case PC:
2334 case CC0:
2335 case CONST_INT:
2336 case CONST_DOUBLE:
2337 case CONST:
2338 case SYMBOL_REF:
2339 case LABEL_REF:
2340 case REG:
2341 return;
2343 case SET:
2344 /* Short cut for very common case. */
2345 replace_call_address (XEXP (x, 1), reg, addr);
2346 return;
2348 case CALL:
2349 /* Short cut for very common case. */
2350 replace_call_address (XEXP (x, 0), reg, addr);
2351 return;
2353 case MEM:
2354 /* If this MEM uses a reg other than the one we expected,
2355 something is wrong. */
2356 if (XEXP (x, 0) != reg)
2357 abort ();
2358 XEXP (x, 0) = addr;
2359 return;
2361 default:
2362 break;
2365 fmt = GET_RTX_FORMAT (code);
2366 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2368 if (fmt[i] == 'e')
2369 replace_call_address (XEXP (x, i), reg, addr);
2370 else if (fmt[i] == 'E')
2372 int j;
2373 for (j = 0; j < XVECLEN (x, i); j++)
2374 replace_call_address (XVECEXP (x, i, j), reg, addr);
2378 #endif
2380 /* Return the number of memory refs to addresses that vary
2381 in the rtx X. */
2383 static int
2384 count_nonfixed_reads (loop, x)
2385 const struct loop *loop;
2386 rtx x;
2388 enum rtx_code code;
2389 int i;
2390 const char *fmt;
2391 int value;
2393 if (x == 0)
2394 return 0;
2396 code = GET_CODE (x);
2397 switch (code)
2399 case PC:
2400 case CC0:
2401 case CONST_INT:
2402 case CONST_DOUBLE:
2403 case CONST:
2404 case SYMBOL_REF:
2405 case LABEL_REF:
2406 case REG:
2407 return 0;
2409 case MEM:
2410 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2411 + count_nonfixed_reads (loop, XEXP (x, 0)));
2413 default:
2414 break;
2417 value = 0;
2418 fmt = GET_RTX_FORMAT (code);
2419 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2421 if (fmt[i] == 'e')
2422 value += count_nonfixed_reads (loop, XEXP (x, i));
2423 if (fmt[i] == 'E')
2425 int j;
2426 for (j = 0; j < XVECLEN (x, i); j++)
2427 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2430 return value;
2433 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2434 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2435 `unknown_address_altered', `unknown_constant_address_altered', and
2436 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2437 list `store_mems' in LOOP. */
2439 static void
2440 prescan_loop (loop)
2441 struct loop *loop;
2443 int level = 1;
2444 rtx insn;
2445 struct loop_info *loop_info = LOOP_INFO (loop);
2446 rtx start = loop->start;
2447 rtx end = loop->end;
2448 /* The label after END. Jumping here is just like falling off the
2449 end of the loop. We use next_nonnote_insn instead of next_label
2450 as a hedge against the (pathological) case where some actual insn
2451 might end up between the two. */
2452 rtx exit_target = next_nonnote_insn (end);
2454 loop_info->has_indirect_jump = indirect_jump_in_function;
2455 loop_info->pre_header_has_call = 0;
2456 loop_info->has_call = 0;
2457 loop_info->has_nonconst_call = 0;
2458 loop_info->has_prefetch = 0;
2459 loop_info->has_volatile = 0;
2460 loop_info->has_tablejump = 0;
2461 loop_info->has_multiple_exit_targets = 0;
2462 loop->level = 1;
2464 loop_info->unknown_address_altered = 0;
2465 loop_info->unknown_constant_address_altered = 0;
2466 loop_info->store_mems = NULL_RTX;
2467 loop_info->first_loop_store_insn = NULL_RTX;
2468 loop_info->mems_idx = 0;
2469 loop_info->num_mem_sets = 0;
2472 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2473 insn = PREV_INSN (insn))
2475 if (GET_CODE (insn) == CALL_INSN)
2477 loop_info->pre_header_has_call = 1;
2478 break;
2482 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2483 insn = NEXT_INSN (insn))
2485 switch (GET_CODE (insn))
2487 case NOTE:
2488 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2490 ++level;
2491 /* Count number of loops contained in this one. */
2492 loop->level++;
2494 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2495 --level;
2496 break;
2498 case CALL_INSN:
2499 if (! CONST_OR_PURE_CALL_P (insn))
2501 loop_info->unknown_address_altered = 1;
2502 loop_info->has_nonconst_call = 1;
2504 else if (pure_call_p (insn))
2505 loop_info->has_nonconst_call = 1;
2506 loop_info->has_call = 1;
2507 if (can_throw_internal (insn))
2508 loop_info->has_multiple_exit_targets = 1;
2509 break;
2511 case JUMP_INSN:
2512 if (! loop_info->has_multiple_exit_targets)
2514 rtx set = pc_set (insn);
2516 if (set)
2518 rtx src = SET_SRC (set);
2519 rtx label1, label2;
2521 if (GET_CODE (src) == IF_THEN_ELSE)
2523 label1 = XEXP (src, 1);
2524 label2 = XEXP (src, 2);
2526 else
2528 label1 = src;
2529 label2 = NULL_RTX;
2534 if (label1 && label1 != pc_rtx)
2536 if (GET_CODE (label1) != LABEL_REF)
2538 /* Something tricky. */
2539 loop_info->has_multiple_exit_targets = 1;
2540 break;
2542 else if (XEXP (label1, 0) != exit_target
2543 && LABEL_OUTSIDE_LOOP_P (label1))
2545 /* A jump outside the current loop. */
2546 loop_info->has_multiple_exit_targets = 1;
2547 break;
2551 label1 = label2;
2552 label2 = NULL_RTX;
2554 while (label1);
2556 else
2558 /* A return, or something tricky. */
2559 loop_info->has_multiple_exit_targets = 1;
2562 /* FALLTHRU */
2564 case INSN:
2565 if (volatile_refs_p (PATTERN (insn)))
2566 loop_info->has_volatile = 1;
2568 if (GET_CODE (insn) == JUMP_INSN
2569 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2570 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2571 loop_info->has_tablejump = 1;
2573 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2574 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2575 loop_info->first_loop_store_insn = insn;
2577 if (flag_non_call_exceptions && can_throw_internal (insn))
2578 loop_info->has_multiple_exit_targets = 1;
2579 break;
2581 default:
2582 break;
2586 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2587 if (/* An exception thrown by a called function might land us
2588 anywhere. */
2589 ! loop_info->has_nonconst_call
2590 /* We don't want loads for MEMs moved to a location before the
2591 one at which their stack memory becomes allocated. (Note
2592 that this is not a problem for malloc, etc., since those
2593 require actual function calls. */
2594 && ! current_function_calls_alloca
2595 /* There are ways to leave the loop other than falling off the
2596 end. */
2597 && ! loop_info->has_multiple_exit_targets)
2598 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2599 insn = NEXT_INSN (insn))
2600 for_each_rtx (&insn, insert_loop_mem, loop_info);
2602 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2603 that loop_invariant_p and load_mems can use true_dependence
2604 to determine what is really clobbered. */
2605 if (loop_info->unknown_address_altered)
2607 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2609 loop_info->store_mems
2610 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2612 if (loop_info->unknown_constant_address_altered)
2614 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2616 RTX_UNCHANGING_P (mem) = 1;
2617 loop_info->store_mems
2618 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2622 /* Invalidate all loops containing LABEL. */
2624 static void
2625 invalidate_loops_containing_label (label)
2626 rtx label;
2628 struct loop *loop;
2629 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2630 loop->invalid = 1;
2633 /* Scan the function looking for loops. Record the start and end of each loop.
2634 Also mark as invalid loops any loops that contain a setjmp or are branched
2635 to from outside the loop. */
2637 static void
2638 find_and_verify_loops (f, loops)
2639 rtx f;
2640 struct loops *loops;
2642 rtx insn;
2643 rtx label;
2644 int num_loops;
2645 struct loop *current_loop;
2646 struct loop *next_loop;
2647 struct loop *loop;
2649 num_loops = loops->num;
2651 compute_luids (f, NULL_RTX, 0);
2653 /* If there are jumps to undefined labels,
2654 treat them as jumps out of any/all loops.
2655 This also avoids writing past end of tables when there are no loops. */
2656 uid_loop[0] = NULL;
2658 /* Find boundaries of loops, mark which loops are contained within
2659 loops, and invalidate loops that have setjmp. */
2661 num_loops = 0;
2662 current_loop = NULL;
2663 for (insn = f; insn; insn = NEXT_INSN (insn))
2665 if (GET_CODE (insn) == NOTE)
2666 switch (NOTE_LINE_NUMBER (insn))
2668 case NOTE_INSN_LOOP_BEG:
2669 next_loop = loops->array + num_loops;
2670 next_loop->num = num_loops;
2671 num_loops++;
2672 next_loop->start = insn;
2673 next_loop->outer = current_loop;
2674 current_loop = next_loop;
2675 break;
2677 case NOTE_INSN_LOOP_CONT:
2678 current_loop->cont = insn;
2679 break;
2681 case NOTE_INSN_LOOP_VTOP:
2682 current_loop->vtop = insn;
2683 break;
2685 case NOTE_INSN_LOOP_END:
2686 if (! current_loop)
2687 abort ();
2689 current_loop->end = insn;
2690 current_loop = current_loop->outer;
2691 break;
2693 default:
2694 break;
2697 if (GET_CODE (insn) == CALL_INSN
2698 && find_reg_note (insn, REG_SETJMP, NULL))
2700 /* In this case, we must invalidate our current loop and any
2701 enclosing loop. */
2702 for (loop = current_loop; loop; loop = loop->outer)
2704 loop->invalid = 1;
2705 if (loop_dump_stream)
2706 fprintf (loop_dump_stream,
2707 "\nLoop at %d ignored due to setjmp.\n",
2708 INSN_UID (loop->start));
2712 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2713 enclosing loop, but this doesn't matter. */
2714 uid_loop[INSN_UID (insn)] = current_loop;
2717 /* Any loop containing a label used in an initializer must be invalidated,
2718 because it can be jumped into from anywhere. */
2719 for (label = forced_labels; label; label = XEXP (label, 1))
2720 invalidate_loops_containing_label (XEXP (label, 0));
2722 /* Any loop containing a label used for an exception handler must be
2723 invalidated, because it can be jumped into from anywhere. */
2724 for_each_eh_label (invalidate_loops_containing_label);
2726 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2727 loop that it is not contained within, that loop is marked invalid.
2728 If any INSN or CALL_INSN uses a label's address, then the loop containing
2729 that label is marked invalid, because it could be jumped into from
2730 anywhere.
2732 Also look for blocks of code ending in an unconditional branch that
2733 exits the loop. If such a block is surrounded by a conditional
2734 branch around the block, move the block elsewhere (see below) and
2735 invert the jump to point to the code block. This may eliminate a
2736 label in our loop and will simplify processing by both us and a
2737 possible second cse pass. */
2739 for (insn = f; insn; insn = NEXT_INSN (insn))
2740 if (INSN_P (insn))
2742 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2744 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2746 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2747 if (note)
2748 invalidate_loops_containing_label (XEXP (note, 0));
2751 if (GET_CODE (insn) != JUMP_INSN)
2752 continue;
2754 mark_loop_jump (PATTERN (insn), this_loop);
2756 /* See if this is an unconditional branch outside the loop. */
2757 if (this_loop
2758 && (GET_CODE (PATTERN (insn)) == RETURN
2759 || (any_uncondjump_p (insn)
2760 && onlyjump_p (insn)
2761 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2762 != this_loop)))
2763 && get_max_uid () < max_uid_for_loop)
2765 rtx p;
2766 rtx our_next = next_real_insn (insn);
2767 rtx last_insn_to_move = NEXT_INSN (insn);
2768 struct loop *dest_loop;
2769 struct loop *outer_loop = NULL;
2771 /* Go backwards until we reach the start of the loop, a label,
2772 or a JUMP_INSN. */
2773 for (p = PREV_INSN (insn);
2774 GET_CODE (p) != CODE_LABEL
2775 && ! (GET_CODE (p) == NOTE
2776 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2777 && GET_CODE (p) != JUMP_INSN;
2778 p = PREV_INSN (p))
2781 /* Check for the case where we have a jump to an inner nested
2782 loop, and do not perform the optimization in that case. */
2784 if (JUMP_LABEL (insn))
2786 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2787 if (dest_loop)
2789 for (outer_loop = dest_loop; outer_loop;
2790 outer_loop = outer_loop->outer)
2791 if (outer_loop == this_loop)
2792 break;
2796 /* Make sure that the target of P is within the current loop. */
2798 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2799 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2800 outer_loop = this_loop;
2802 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2803 we have a block of code to try to move.
2805 We look backward and then forward from the target of INSN
2806 to find a BARRIER at the same loop depth as the target.
2807 If we find such a BARRIER, we make a new label for the start
2808 of the block, invert the jump in P and point it to that label,
2809 and move the block of code to the spot we found. */
2811 if (! outer_loop
2812 && GET_CODE (p) == JUMP_INSN
2813 && JUMP_LABEL (p) != 0
2814 /* Just ignore jumps to labels that were never emitted.
2815 These always indicate compilation errors. */
2816 && INSN_UID (JUMP_LABEL (p)) != 0
2817 && any_condjump_p (p) && onlyjump_p (p)
2818 && next_real_insn (JUMP_LABEL (p)) == our_next
2819 /* If it's not safe to move the sequence, then we
2820 mustn't try. */
2821 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2822 &last_insn_to_move))
2824 rtx target
2825 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2826 struct loop *target_loop = uid_loop[INSN_UID (target)];
2827 rtx loc, loc2;
2828 rtx tmp;
2830 /* Search for possible garbage past the conditional jumps
2831 and look for the last barrier. */
2832 for (tmp = last_insn_to_move;
2833 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2834 if (GET_CODE (tmp) == BARRIER)
2835 last_insn_to_move = tmp;
2837 for (loc = target; loc; loc = PREV_INSN (loc))
2838 if (GET_CODE (loc) == BARRIER
2839 /* Don't move things inside a tablejump. */
2840 && ((loc2 = next_nonnote_insn (loc)) == 0
2841 || GET_CODE (loc2) != CODE_LABEL
2842 || (loc2 = next_nonnote_insn (loc2)) == 0
2843 || GET_CODE (loc2) != JUMP_INSN
2844 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2845 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2846 && uid_loop[INSN_UID (loc)] == target_loop)
2847 break;
2849 if (loc == 0)
2850 for (loc = target; loc; loc = NEXT_INSN (loc))
2851 if (GET_CODE (loc) == BARRIER
2852 /* Don't move things inside a tablejump. */
2853 && ((loc2 = next_nonnote_insn (loc)) == 0
2854 || GET_CODE (loc2) != CODE_LABEL
2855 || (loc2 = next_nonnote_insn (loc2)) == 0
2856 || GET_CODE (loc2) != JUMP_INSN
2857 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2858 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2859 && uid_loop[INSN_UID (loc)] == target_loop)
2860 break;
2862 if (loc)
2864 rtx cond_label = JUMP_LABEL (p);
2865 rtx new_label = get_label_after (p);
2867 /* Ensure our label doesn't go away. */
2868 LABEL_NUSES (cond_label)++;
2870 /* Verify that uid_loop is large enough and that
2871 we can invert P. */
2872 if (invert_jump (p, new_label, 1))
2874 rtx q, r;
2876 /* If no suitable BARRIER was found, create a suitable
2877 one before TARGET. Since TARGET is a fall through
2878 path, we'll need to insert an jump around our block
2879 and add a BARRIER before TARGET.
2881 This creates an extra unconditional jump outside
2882 the loop. However, the benefits of removing rarely
2883 executed instructions from inside the loop usually
2884 outweighs the cost of the extra unconditional jump
2885 outside the loop. */
2886 if (loc == 0)
2888 rtx temp;
2890 temp = gen_jump (JUMP_LABEL (insn));
2891 temp = emit_jump_insn_before (temp, target);
2892 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2893 LABEL_NUSES (JUMP_LABEL (insn))++;
2894 loc = emit_barrier_before (target);
2897 /* Include the BARRIER after INSN and copy the
2898 block after LOC. */
2899 if (squeeze_notes (&new_label, &last_insn_to_move))
2900 abort ();
2901 reorder_insns (new_label, last_insn_to_move, loc);
2903 /* All those insns are now in TARGET_LOOP. */
2904 for (q = new_label;
2905 q != NEXT_INSN (last_insn_to_move);
2906 q = NEXT_INSN (q))
2907 uid_loop[INSN_UID (q)] = target_loop;
2909 /* The label jumped to by INSN is no longer a loop
2910 exit. Unless INSN does not have a label (e.g.,
2911 it is a RETURN insn), search loop->exit_labels
2912 to find its label_ref, and remove it. Also turn
2913 off LABEL_OUTSIDE_LOOP_P bit. */
2914 if (JUMP_LABEL (insn))
2916 for (q = 0, r = this_loop->exit_labels;
2918 q = r, r = LABEL_NEXTREF (r))
2919 if (XEXP (r, 0) == JUMP_LABEL (insn))
2921 LABEL_OUTSIDE_LOOP_P (r) = 0;
2922 if (q)
2923 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2924 else
2925 this_loop->exit_labels = LABEL_NEXTREF (r);
2926 break;
2929 for (loop = this_loop; loop && loop != target_loop;
2930 loop = loop->outer)
2931 loop->exit_count--;
2933 /* If we didn't find it, then something is
2934 wrong. */
2935 if (! r)
2936 abort ();
2939 /* P is now a jump outside the loop, so it must be put
2940 in loop->exit_labels, and marked as such.
2941 The easiest way to do this is to just call
2942 mark_loop_jump again for P. */
2943 mark_loop_jump (PATTERN (p), this_loop);
2945 /* If INSN now jumps to the insn after it,
2946 delete INSN. */
2947 if (JUMP_LABEL (insn) != 0
2948 && (next_real_insn (JUMP_LABEL (insn))
2949 == next_real_insn (insn)))
2950 delete_related_insns (insn);
2953 /* Continue the loop after where the conditional
2954 branch used to jump, since the only branch insn
2955 in the block (if it still remains) is an inter-loop
2956 branch and hence needs no processing. */
2957 insn = NEXT_INSN (cond_label);
2959 if (--LABEL_NUSES (cond_label) == 0)
2960 delete_related_insns (cond_label);
2962 /* This loop will be continued with NEXT_INSN (insn). */
2963 insn = PREV_INSN (insn);
2970 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2971 loops it is contained in, mark the target loop invalid.
2973 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2975 static void
2976 mark_loop_jump (x, loop)
2977 rtx x;
2978 struct loop *loop;
2980 struct loop *dest_loop;
2981 struct loop *outer_loop;
2982 int i;
2984 switch (GET_CODE (x))
2986 case PC:
2987 case USE:
2988 case CLOBBER:
2989 case REG:
2990 case MEM:
2991 case CONST_INT:
2992 case CONST_DOUBLE:
2993 case RETURN:
2994 return;
2996 case CONST:
2997 /* There could be a label reference in here. */
2998 mark_loop_jump (XEXP (x, 0), loop);
2999 return;
3001 case PLUS:
3002 case MINUS:
3003 case MULT:
3004 mark_loop_jump (XEXP (x, 0), loop);
3005 mark_loop_jump (XEXP (x, 1), loop);
3006 return;
3008 case LO_SUM:
3009 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3010 mark_loop_jump (XEXP (x, 1), loop);
3011 return;
3013 case SIGN_EXTEND:
3014 case ZERO_EXTEND:
3015 mark_loop_jump (XEXP (x, 0), loop);
3016 return;
3018 case LABEL_REF:
3019 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3021 /* Link together all labels that branch outside the loop. This
3022 is used by final_[bg]iv_value and the loop unrolling code. Also
3023 mark this LABEL_REF so we know that this branch should predict
3024 false. */
3026 /* A check to make sure the label is not in an inner nested loop,
3027 since this does not count as a loop exit. */
3028 if (dest_loop)
3030 for (outer_loop = dest_loop; outer_loop;
3031 outer_loop = outer_loop->outer)
3032 if (outer_loop == loop)
3033 break;
3035 else
3036 outer_loop = NULL;
3038 if (loop && ! outer_loop)
3040 LABEL_OUTSIDE_LOOP_P (x) = 1;
3041 LABEL_NEXTREF (x) = loop->exit_labels;
3042 loop->exit_labels = x;
3044 for (outer_loop = loop;
3045 outer_loop && outer_loop != dest_loop;
3046 outer_loop = outer_loop->outer)
3047 outer_loop->exit_count++;
3050 /* If this is inside a loop, but not in the current loop or one enclosed
3051 by it, it invalidates at least one loop. */
3053 if (! dest_loop)
3054 return;
3056 /* We must invalidate every nested loop containing the target of this
3057 label, except those that also contain the jump insn. */
3059 for (; dest_loop; dest_loop = dest_loop->outer)
3061 /* Stop when we reach a loop that also contains the jump insn. */
3062 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3063 if (dest_loop == outer_loop)
3064 return;
3066 /* If we get here, we know we need to invalidate a loop. */
3067 if (loop_dump_stream && ! dest_loop->invalid)
3068 fprintf (loop_dump_stream,
3069 "\nLoop at %d ignored due to multiple entry points.\n",
3070 INSN_UID (dest_loop->start));
3072 dest_loop->invalid = 1;
3074 return;
3076 case SET:
3077 /* If this is not setting pc, ignore. */
3078 if (SET_DEST (x) == pc_rtx)
3079 mark_loop_jump (SET_SRC (x), loop);
3080 return;
3082 case IF_THEN_ELSE:
3083 mark_loop_jump (XEXP (x, 1), loop);
3084 mark_loop_jump (XEXP (x, 2), loop);
3085 return;
3087 case PARALLEL:
3088 case ADDR_VEC:
3089 for (i = 0; i < XVECLEN (x, 0); i++)
3090 mark_loop_jump (XVECEXP (x, 0, i), loop);
3091 return;
3093 case ADDR_DIFF_VEC:
3094 for (i = 0; i < XVECLEN (x, 1); i++)
3095 mark_loop_jump (XVECEXP (x, 1, i), loop);
3096 return;
3098 default:
3099 /* Strictly speaking this is not a jump into the loop, only a possible
3100 jump out of the loop. However, we have no way to link the destination
3101 of this jump onto the list of exit labels. To be safe we mark this
3102 loop and any containing loops as invalid. */
3103 if (loop)
3105 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3107 if (loop_dump_stream && ! outer_loop->invalid)
3108 fprintf (loop_dump_stream,
3109 "\nLoop at %d ignored due to unknown exit jump.\n",
3110 INSN_UID (outer_loop->start));
3111 outer_loop->invalid = 1;
3114 return;
3118 /* Return nonzero if there is a label in the range from
3119 insn INSN to and including the insn whose luid is END
3120 INSN must have an assigned luid (i.e., it must not have
3121 been previously created by loop.c). */
3123 static int
3124 labels_in_range_p (insn, end)
3125 rtx insn;
3126 int end;
3128 while (insn && INSN_LUID (insn) <= end)
3130 if (GET_CODE (insn) == CODE_LABEL)
3131 return 1;
3132 insn = NEXT_INSN (insn);
3135 return 0;
3138 /* Record that a memory reference X is being set. */
3140 static void
3141 note_addr_stored (x, y, data)
3142 rtx x;
3143 rtx y ATTRIBUTE_UNUSED;
3144 void *data ATTRIBUTE_UNUSED;
3146 struct loop_info *loop_info = data;
3148 if (x == 0 || GET_CODE (x) != MEM)
3149 return;
3151 /* Count number of memory writes.
3152 This affects heuristics in strength_reduce. */
3153 loop_info->num_mem_sets++;
3155 /* BLKmode MEM means all memory is clobbered. */
3156 if (GET_MODE (x) == BLKmode)
3158 if (RTX_UNCHANGING_P (x))
3159 loop_info->unknown_constant_address_altered = 1;
3160 else
3161 loop_info->unknown_address_altered = 1;
3163 return;
3166 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3167 loop_info->store_mems);
3170 /* X is a value modified by an INSN that references a biv inside a loop
3171 exit test (ie, X is somehow related to the value of the biv). If X
3172 is a pseudo that is used more than once, then the biv is (effectively)
3173 used more than once. DATA is a pointer to a loop_regs structure. */
3175 static void
3176 note_set_pseudo_multiple_uses (x, y, data)
3177 rtx x;
3178 rtx y ATTRIBUTE_UNUSED;
3179 void *data;
3181 struct loop_regs *regs = (struct loop_regs *) data;
3183 if (x == 0)
3184 return;
3186 while (GET_CODE (x) == STRICT_LOW_PART
3187 || GET_CODE (x) == SIGN_EXTRACT
3188 || GET_CODE (x) == ZERO_EXTRACT
3189 || GET_CODE (x) == SUBREG)
3190 x = XEXP (x, 0);
3192 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3193 return;
3195 /* If we do not have usage information, or if we know the register
3196 is used more than once, note that fact for check_dbra_loop. */
3197 if (REGNO (x) >= max_reg_before_loop
3198 || ! regs->array[REGNO (x)].single_usage
3199 || regs->array[REGNO (x)].single_usage == const0_rtx)
3200 regs->multiple_uses = 1;
3203 /* Return nonzero if the rtx X is invariant over the current loop.
3205 The value is 2 if we refer to something only conditionally invariant.
3207 A memory ref is invariant if it is not volatile and does not conflict
3208 with anything stored in `loop_info->store_mems'. */
3211 loop_invariant_p (loop, x)
3212 const struct loop *loop;
3213 rtx x;
3215 struct loop_info *loop_info = LOOP_INFO (loop);
3216 struct loop_regs *regs = LOOP_REGS (loop);
3217 int i;
3218 enum rtx_code code;
3219 const char *fmt;
3220 int conditional = 0;
3221 rtx mem_list_entry;
3223 if (x == 0)
3224 return 1;
3225 code = GET_CODE (x);
3226 switch (code)
3228 case CONST_INT:
3229 case CONST_DOUBLE:
3230 case SYMBOL_REF:
3231 case CONST:
3232 return 1;
3234 case LABEL_REF:
3235 /* A LABEL_REF is normally invariant, however, if we are unrolling
3236 loops, and this label is inside the loop, then it isn't invariant.
3237 This is because each unrolled copy of the loop body will have
3238 a copy of this label. If this was invariant, then an insn loading
3239 the address of this label into a register might get moved outside
3240 the loop, and then each loop body would end up using the same label.
3242 We don't know the loop bounds here though, so just fail for all
3243 labels. */
3244 if (flag_unroll_loops)
3245 return 0;
3246 else
3247 return 1;
3249 case PC:
3250 case CC0:
3251 case UNSPEC_VOLATILE:
3252 return 0;
3254 case REG:
3255 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3256 since the reg might be set by initialization within the loop. */
3258 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3259 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3260 && ! current_function_has_nonlocal_goto)
3261 return 1;
3263 if (LOOP_INFO (loop)->has_call
3264 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3265 return 0;
3267 if (regs->array[REGNO (x)].set_in_loop < 0)
3268 return 2;
3270 return regs->array[REGNO (x)].set_in_loop == 0;
3272 case MEM:
3273 /* Volatile memory references must be rejected. Do this before
3274 checking for read-only items, so that volatile read-only items
3275 will be rejected also. */
3276 if (MEM_VOLATILE_P (x))
3277 return 0;
3279 /* See if there is any dependence between a store and this load. */
3280 mem_list_entry = loop_info->store_mems;
3281 while (mem_list_entry)
3283 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3284 x, rtx_varies_p))
3285 return 0;
3287 mem_list_entry = XEXP (mem_list_entry, 1);
3290 /* It's not invalidated by a store in memory
3291 but we must still verify the address is invariant. */
3292 break;
3294 case ASM_OPERANDS:
3295 /* Don't mess with insns declared volatile. */
3296 if (MEM_VOLATILE_P (x))
3297 return 0;
3298 break;
3300 default:
3301 break;
3304 fmt = GET_RTX_FORMAT (code);
3305 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3307 if (fmt[i] == 'e')
3309 int tem = loop_invariant_p (loop, XEXP (x, i));
3310 if (tem == 0)
3311 return 0;
3312 if (tem == 2)
3313 conditional = 1;
3315 else if (fmt[i] == 'E')
3317 int j;
3318 for (j = 0; j < XVECLEN (x, i); j++)
3320 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3321 if (tem == 0)
3322 return 0;
3323 if (tem == 2)
3324 conditional = 1;
3330 return 1 + conditional;
3333 /* Return nonzero if all the insns in the loop that set REG
3334 are INSN and the immediately following insns,
3335 and if each of those insns sets REG in an invariant way
3336 (not counting uses of REG in them).
3338 The value is 2 if some of these insns are only conditionally invariant.
3340 We assume that INSN itself is the first set of REG
3341 and that its source is invariant. */
3343 static int
3344 consec_sets_invariant_p (loop, reg, n_sets, insn)
3345 const struct loop *loop;
3346 int n_sets;
3347 rtx reg, insn;
3349 struct loop_regs *regs = LOOP_REGS (loop);
3350 rtx p = insn;
3351 unsigned int regno = REGNO (reg);
3352 rtx temp;
3353 /* Number of sets we have to insist on finding after INSN. */
3354 int count = n_sets - 1;
3355 int old = regs->array[regno].set_in_loop;
3356 int value = 0;
3357 int this;
3359 /* If N_SETS hit the limit, we can't rely on its value. */
3360 if (n_sets == 127)
3361 return 0;
3363 regs->array[regno].set_in_loop = 0;
3365 while (count > 0)
3367 enum rtx_code code;
3368 rtx set;
3370 p = NEXT_INSN (p);
3371 code = GET_CODE (p);
3373 /* If library call, skip to end of it. */
3374 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3375 p = XEXP (temp, 0);
3377 this = 0;
3378 if (code == INSN
3379 && (set = single_set (p))
3380 && GET_CODE (SET_DEST (set)) == REG
3381 && REGNO (SET_DEST (set)) == regno)
3383 this = loop_invariant_p (loop, SET_SRC (set));
3384 if (this != 0)
3385 value |= this;
3386 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3388 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3389 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3390 notes are OK. */
3391 this = (CONSTANT_P (XEXP (temp, 0))
3392 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3393 && loop_invariant_p (loop, XEXP (temp, 0))));
3394 if (this != 0)
3395 value |= this;
3398 if (this != 0)
3399 count--;
3400 else if (code != NOTE)
3402 regs->array[regno].set_in_loop = old;
3403 return 0;
3407 regs->array[regno].set_in_loop = old;
3408 /* If loop_invariant_p ever returned 2, we return 2. */
3409 return 1 + (value & 2);
3412 #if 0
3413 /* I don't think this condition is sufficient to allow INSN
3414 to be moved, so we no longer test it. */
3416 /* Return 1 if all insns in the basic block of INSN and following INSN
3417 that set REG are invariant according to TABLE. */
3419 static int
3420 all_sets_invariant_p (reg, insn, table)
3421 rtx reg, insn;
3422 short *table;
3424 rtx p = insn;
3425 int regno = REGNO (reg);
3427 while (1)
3429 enum rtx_code code;
3430 p = NEXT_INSN (p);
3431 code = GET_CODE (p);
3432 if (code == CODE_LABEL || code == JUMP_INSN)
3433 return 1;
3434 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3435 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3436 && REGNO (SET_DEST (PATTERN (p))) == regno)
3438 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3439 return 0;
3443 #endif /* 0 */
3445 /* Look at all uses (not sets) of registers in X. For each, if it is
3446 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3447 a different insn, set USAGE[REGNO] to const0_rtx. */
3449 static void
3450 find_single_use_in_loop (regs, insn, x)
3451 struct loop_regs *regs;
3452 rtx insn;
3453 rtx x;
3455 enum rtx_code code = GET_CODE (x);
3456 const char *fmt = GET_RTX_FORMAT (code);
3457 int i, j;
3459 if (code == REG)
3460 regs->array[REGNO (x)].single_usage
3461 = (regs->array[REGNO (x)].single_usage != 0
3462 && regs->array[REGNO (x)].single_usage != insn)
3463 ? const0_rtx : insn;
3465 else if (code == SET)
3467 /* Don't count SET_DEST if it is a REG; otherwise count things
3468 in SET_DEST because if a register is partially modified, it won't
3469 show up as a potential movable so we don't care how USAGE is set
3470 for it. */
3471 if (GET_CODE (SET_DEST (x)) != REG)
3472 find_single_use_in_loop (regs, insn, SET_DEST (x));
3473 find_single_use_in_loop (regs, insn, SET_SRC (x));
3475 else
3476 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3478 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3479 find_single_use_in_loop (regs, insn, XEXP (x, i));
3480 else if (fmt[i] == 'E')
3481 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3482 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3486 /* Count and record any set in X which is contained in INSN. Update
3487 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3488 in X. */
3490 static void
3491 count_one_set (regs, insn, x, last_set)
3492 struct loop_regs *regs;
3493 rtx insn, x;
3494 rtx *last_set;
3496 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3497 /* Don't move a reg that has an explicit clobber.
3498 It's not worth the pain to try to do it correctly. */
3499 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3501 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3503 rtx dest = SET_DEST (x);
3504 while (GET_CODE (dest) == SUBREG
3505 || GET_CODE (dest) == ZERO_EXTRACT
3506 || GET_CODE (dest) == SIGN_EXTRACT
3507 || GET_CODE (dest) == STRICT_LOW_PART)
3508 dest = XEXP (dest, 0);
3509 if (GET_CODE (dest) == REG)
3511 int i;
3512 int regno = REGNO (dest);
3513 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3515 /* If this is the first setting of this reg
3516 in current basic block, and it was set before,
3517 it must be set in two basic blocks, so it cannot
3518 be moved out of the loop. */
3519 if (regs->array[regno].set_in_loop > 0
3520 && last_set == 0)
3521 regs->array[regno+i].may_not_optimize = 1;
3522 /* If this is not first setting in current basic block,
3523 see if reg was used in between previous one and this.
3524 If so, neither one can be moved. */
3525 if (last_set[regno] != 0
3526 && reg_used_between_p (dest, last_set[regno], insn))
3527 regs->array[regno+i].may_not_optimize = 1;
3528 if (regs->array[regno+i].set_in_loop < 127)
3529 ++regs->array[regno+i].set_in_loop;
3530 last_set[regno+i] = insn;
3536 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3537 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3538 contained in insn INSN is used by any insn that precedes INSN in
3539 cyclic order starting from the loop entry point.
3541 We don't want to use INSN_LUID here because if we restrict INSN to those
3542 that have a valid INSN_LUID, it means we cannot move an invariant out
3543 from an inner loop past two loops. */
3545 static int
3546 loop_reg_used_before_p (loop, set, insn)
3547 const struct loop *loop;
3548 rtx set, insn;
3550 rtx reg = SET_DEST (set);
3551 rtx p;
3553 /* Scan forward checking for register usage. If we hit INSN, we
3554 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3555 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3557 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3558 return 1;
3560 if (p == loop->end)
3561 p = loop->start;
3564 return 0;
3568 /* Information we collect about arrays that we might want to prefetch. */
3569 struct prefetch_info
3571 struct iv_class *class; /* Class this prefetch is based on. */
3572 struct induction *giv; /* GIV this prefetch is based on. */
3573 rtx base_address; /* Start prefetching from this address plus
3574 index. */
3575 HOST_WIDE_INT index;
3576 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3577 iteration. */
3578 unsigned int bytes_accessed; /* Sum of sizes of all acceses to this
3579 prefetch area in one iteration. */
3580 unsigned int total_bytes; /* Total bytes loop will access in this block.
3581 This is set only for loops with known
3582 iteration counts and is 0xffffffff
3583 otherwise. */
3584 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3585 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3586 unsigned int write : 1; /* 1 for read/write prefetches. */
3589 /* Data used by check_store function. */
3590 struct check_store_data
3592 rtx mem_address;
3593 int mem_write;
3596 static void check_store PARAMS ((rtx, rtx, void *));
3597 static void emit_prefetch_instructions PARAMS ((struct loop *));
3598 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3600 /* Set mem_write when mem_address is found. Used as callback to
3601 note_stores. */
3602 static void
3603 check_store (x, pat, data)
3604 rtx x, pat ATTRIBUTE_UNUSED;
3605 void *data;
3607 struct check_store_data *d = (struct check_store_data *) data;
3609 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3610 d->mem_write = 1;
3613 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3614 important to get some addresses combined. Later more sophisticated
3615 transformations can be added when necesary.
3617 ??? Same trick with swapping operand is done at several other places.
3618 It can be nice to develop some common way to handle this. */
3620 static int
3621 rtx_equal_for_prefetch_p (x, y)
3622 rtx x, y;
3624 int i;
3625 int j;
3626 enum rtx_code code = GET_CODE (x);
3627 const char *fmt;
3629 if (x == y)
3630 return 1;
3631 if (code != GET_CODE (y))
3632 return 0;
3634 code = GET_CODE (x);
3636 if (GET_RTX_CLASS (code) == 'c')
3638 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3639 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3640 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3641 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3643 /* Compare the elements. If any pair of corresponding elements fails to
3644 match, return 0 for the whole thing. */
3646 fmt = GET_RTX_FORMAT (code);
3647 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3649 switch (fmt[i])
3651 case 'w':
3652 if (XWINT (x, i) != XWINT (y, i))
3653 return 0;
3654 break;
3656 case 'i':
3657 if (XINT (x, i) != XINT (y, i))
3658 return 0;
3659 break;
3661 case 'E':
3662 /* Two vectors must have the same length. */
3663 if (XVECLEN (x, i) != XVECLEN (y, i))
3664 return 0;
3666 /* And the corresponding elements must match. */
3667 for (j = 0; j < XVECLEN (x, i); j++)
3668 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3669 XVECEXP (y, i, j)) == 0)
3670 return 0;
3671 break;
3673 case 'e':
3674 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3675 return 0;
3676 break;
3678 case 's':
3679 if (strcmp (XSTR (x, i), XSTR (y, i)))
3680 return 0;
3681 break;
3683 case 'u':
3684 /* These are just backpointers, so they don't matter. */
3685 break;
3687 case '0':
3688 break;
3690 /* It is believed that rtx's at this level will never
3691 contain anything but integers and other rtx's,
3692 except for within LABEL_REFs and SYMBOL_REFs. */
3693 default:
3694 abort ();
3697 return 1;
3700 /* Remove constant addition value from the expression X (when present)
3701 and return it. */
3703 static HOST_WIDE_INT
3704 remove_constant_addition (x)
3705 rtx *x;
3707 HOST_WIDE_INT addval = 0;
3708 rtx exp = *x;
3710 /* Avoid clobbering a shared CONST expression. */
3711 if (GET_CODE (exp) == CONST)
3713 if (GET_CODE (XEXP (exp, 0)) == PLUS
3714 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3715 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3717 *x = XEXP (XEXP (exp, 0), 0);
3718 return INTVAL (XEXP (XEXP (exp, 0), 1));
3720 return 0;
3723 if (GET_CODE (exp) == CONST_INT)
3725 addval = INTVAL (exp);
3726 *x = const0_rtx;
3729 /* For plus expression recurse on ourself. */
3730 else if (GET_CODE (exp) == PLUS)
3732 addval += remove_constant_addition (&XEXP (exp, 0));
3733 addval += remove_constant_addition (&XEXP (exp, 1));
3735 /* In case our parameter was constant, remove extra zero from the
3736 expression. */
3737 if (XEXP (exp, 0) == const0_rtx)
3738 *x = XEXP (exp, 1);
3739 else if (XEXP (exp, 1) == const0_rtx)
3740 *x = XEXP (exp, 0);
3743 return addval;
3746 /* Attempt to identify accesses to arrays that are most likely to cause cache
3747 misses, and emit prefetch instructions a few prefetch blocks forward.
3749 To detect the arrays we use the GIV information that was collected by the
3750 strength reduction pass.
3752 The prefetch instructions are generated after the GIV information is done
3753 and before the strength reduction process. The new GIVs are injected into
3754 the strength reduction tables, so the prefetch addresses are optimized as
3755 well.
3757 GIVs are split into base address, stride, and constant addition values.
3758 GIVs with the same address, stride and close addition values are combined
3759 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3760 for write instructions can be used for the block we write to, on machines
3761 that support write prefetches.
3763 Several heuristics are used to determine when to prefetch. They are
3764 controlled by defined symbols that can be overridden for each target. */
3766 static void
3767 emit_prefetch_instructions (loop)
3768 struct loop *loop;
3770 int num_prefetches = 0;
3771 int num_real_prefetches = 0;
3772 int num_real_write_prefetches = 0;
3773 int num_prefetches_before = 0;
3774 int num_write_prefetches_before = 0;
3775 int ahead = 0;
3776 int i;
3777 struct iv_class *bl;
3778 struct induction *iv;
3779 struct prefetch_info info[MAX_PREFETCHES];
3780 struct loop_ivs *ivs = LOOP_IVS (loop);
3782 if (!HAVE_prefetch)
3783 return;
3785 /* Consider only loops w/o calls. When a call is done, the loop is probably
3786 slow enough to read the memory. */
3787 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3789 if (loop_dump_stream)
3790 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3792 return;
3795 /* Don't prefetch in loops known to have few iterations. */
3796 if (PREFETCH_NO_LOW_LOOPCNT
3797 && LOOP_INFO (loop)->n_iterations
3798 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3800 if (loop_dump_stream)
3801 fprintf (loop_dump_stream,
3802 "Prefetch: ignoring loop: not enough iterations.\n");
3803 return;
3806 /* Search all induction variables and pick those interesting for the prefetch
3807 machinery. */
3808 for (bl = ivs->list; bl; bl = bl->next)
3810 struct induction *biv = bl->biv, *biv1;
3811 int basestride = 0;
3813 biv1 = biv;
3815 /* Expect all BIVs to be executed in each iteration. This makes our
3816 analysis more conservative. */
3817 while (biv1)
3819 /* Discard non-constant additions that we can't handle well yet, and
3820 BIVs that are executed multiple times; such BIVs ought to be
3821 handled in the nested loop. We accept not_every_iteration BIVs,
3822 since these only result in larger strides and make our
3823 heuristics more conservative. */
3824 if (GET_CODE (biv->add_val) != CONST_INT)
3826 if (loop_dump_stream)
3828 fprintf (loop_dump_stream,
3829 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3830 REGNO (biv->src_reg), INSN_UID (biv->insn));
3831 print_rtl (loop_dump_stream, biv->add_val);
3832 fprintf (loop_dump_stream, "\n");
3834 break;
3837 if (biv->maybe_multiple)
3839 if (loop_dump_stream)
3841 fprintf (loop_dump_stream,
3842 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3843 REGNO (biv->src_reg), INSN_UID (biv->insn));
3844 print_rtl (loop_dump_stream, biv->add_val);
3845 fprintf (loop_dump_stream, "\n");
3847 break;
3850 basestride += INTVAL (biv1->add_val);
3851 biv1 = biv1->next_iv;
3854 if (biv1 || !basestride)
3855 continue;
3857 for (iv = bl->giv; iv; iv = iv->next_iv)
3859 rtx address;
3860 rtx temp;
3861 HOST_WIDE_INT index = 0;
3862 int add = 1;
3863 HOST_WIDE_INT stride = 0;
3864 int stride_sign = 1;
3865 struct check_store_data d;
3866 const char *ignore_reason = NULL;
3867 int size = GET_MODE_SIZE (GET_MODE (iv));
3869 /* See whether an induction variable is interesting to us and if
3870 not, report the reason. */
3871 if (iv->giv_type != DEST_ADDR)
3872 ignore_reason = "giv is not a destination address";
3874 /* We are interested only in constant stride memory references
3875 in order to be able to compute density easily. */
3876 else if (GET_CODE (iv->mult_val) != CONST_INT)
3877 ignore_reason = "stride is not constant";
3879 else
3881 stride = INTVAL (iv->mult_val) * basestride;
3882 if (stride < 0)
3884 stride = -stride;
3885 stride_sign = -1;
3888 /* On some targets, reversed order prefetches are not
3889 worthwhile. */
3890 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3891 ignore_reason = "reversed order stride";
3893 /* Prefetch of accesses with an extreme stride might not be
3894 worthwhile, either. */
3895 else if (PREFETCH_NO_EXTREME_STRIDE
3896 && stride > PREFETCH_EXTREME_STRIDE)
3897 ignore_reason = "extreme stride";
3899 /* Ignore GIVs with varying add values; we can't predict the
3900 value for the next iteration. */
3901 else if (!loop_invariant_p (loop, iv->add_val))
3902 ignore_reason = "giv has varying add value";
3904 /* Ignore GIVs in the nested loops; they ought to have been
3905 handled already. */
3906 else if (iv->maybe_multiple)
3907 ignore_reason = "giv is in nested loop";
3910 if (ignore_reason != NULL)
3912 if (loop_dump_stream)
3913 fprintf (loop_dump_stream,
3914 "Prefetch: ignoring giv at %d: %s.\n",
3915 INSN_UID (iv->insn), ignore_reason);
3916 continue;
3919 /* Determine the pointer to the basic array we are examining. It is
3920 the sum of the BIV's initial value and the GIV's add_val. */
3921 address = copy_rtx (iv->add_val);
3922 temp = copy_rtx (bl->initial_value);
3924 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3925 index = remove_constant_addition (&address);
3927 d.mem_write = 0;
3928 d.mem_address = *iv->location;
3930 /* When the GIV is not always executed, we might be better off by
3931 not dirtying the cache pages. */
3932 if (PREFETCH_CONDITIONAL || iv->always_executed)
3933 note_stores (PATTERN (iv->insn), check_store, &d);
3934 else
3936 if (loop_dump_stream)
3937 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3938 INSN_UID (iv->insn), "in conditional code.");
3939 continue;
3942 /* Attempt to find another prefetch to the same array and see if we
3943 can merge this one. */
3944 for (i = 0; i < num_prefetches; i++)
3945 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3946 && stride == info[i].stride)
3948 /* In case both access same array (same location
3949 just with small difference in constant indexes), merge
3950 the prefetches. Just do the later and the earlier will
3951 get prefetched from previous iteration.
3952 The artificial threshold should not be too small,
3953 but also not bigger than small portion of memory usually
3954 traversed by single loop. */
3955 if (index >= info[i].index
3956 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3958 info[i].write |= d.mem_write;
3959 info[i].bytes_accessed += size;
3960 info[i].index = index;
3961 info[i].giv = iv;
3962 info[i].class = bl;
3963 info[num_prefetches].base_address = address;
3964 add = 0;
3965 break;
3968 if (index < info[i].index
3969 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3971 info[i].write |= d.mem_write;
3972 info[i].bytes_accessed += size;
3973 add = 0;
3974 break;
3978 /* Merging failed. */
3979 if (add)
3981 info[num_prefetches].giv = iv;
3982 info[num_prefetches].class = bl;
3983 info[num_prefetches].index = index;
3984 info[num_prefetches].stride = stride;
3985 info[num_prefetches].base_address = address;
3986 info[num_prefetches].write = d.mem_write;
3987 info[num_prefetches].bytes_accessed = size;
3988 num_prefetches++;
3989 if (num_prefetches >= MAX_PREFETCHES)
3991 if (loop_dump_stream)
3992 fprintf (loop_dump_stream,
3993 "Maximal number of prefetches exceeded.\n");
3994 return;
4000 for (i = 0; i < num_prefetches; i++)
4002 int density;
4004 /* Attempt to calculate the total number of bytes fetched by all
4005 iterations of the loop. Avoid overflow. */
4006 if (LOOP_INFO (loop)->n_iterations
4007 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4008 >= LOOP_INFO (loop)->n_iterations))
4009 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4010 else
4011 info[i].total_bytes = 0xffffffff;
4013 density = info[i].bytes_accessed * 100 / info[i].stride;
4015 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4016 if (PREFETCH_ONLY_DENSE_MEM)
4017 if (density * 256 > PREFETCH_DENSE_MEM * 100
4018 && (info[i].total_bytes / PREFETCH_BLOCK
4019 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4021 info[i].prefetch_before_loop = 1;
4022 info[i].prefetch_in_loop
4023 = (info[i].total_bytes / PREFETCH_BLOCK
4024 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4026 else
4028 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4029 if (loop_dump_stream)
4030 fprintf (loop_dump_stream,
4031 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4032 INSN_UID (info[i].giv->insn), density);
4034 else
4035 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4037 /* Find how many prefetch instructions we'll use within the loop. */
4038 if (info[i].prefetch_in_loop != 0)
4040 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4041 / PREFETCH_BLOCK);
4042 num_real_prefetches += info[i].prefetch_in_loop;
4043 if (info[i].write)
4044 num_real_write_prefetches += info[i].prefetch_in_loop;
4048 /* Determine how many iterations ahead to prefetch within the loop, based
4049 on how many prefetches we currently expect to do within the loop. */
4050 if (num_real_prefetches != 0)
4052 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4054 if (loop_dump_stream)
4055 fprintf (loop_dump_stream,
4056 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4057 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4058 num_real_prefetches = 0, num_real_write_prefetches = 0;
4061 /* We'll also use AHEAD to determine how many prefetch instructions to
4062 emit before a loop, so don't leave it zero. */
4063 if (ahead == 0)
4064 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4066 for (i = 0; i < num_prefetches; i++)
4068 /* Update if we've decided not to prefetch anything within the loop. */
4069 if (num_real_prefetches == 0)
4070 info[i].prefetch_in_loop = 0;
4072 /* Find how many prefetch instructions we'll use before the loop. */
4073 if (info[i].prefetch_before_loop != 0)
4075 int n = info[i].total_bytes / PREFETCH_BLOCK;
4076 if (n > ahead)
4077 n = ahead;
4078 info[i].prefetch_before_loop = n;
4079 num_prefetches_before += n;
4080 if (info[i].write)
4081 num_write_prefetches_before += n;
4084 if (loop_dump_stream)
4086 if (info[i].prefetch_in_loop == 0
4087 && info[i].prefetch_before_loop == 0)
4088 continue;
4089 fprintf (loop_dump_stream, "Prefetch insn: %d",
4090 INSN_UID (info[i].giv->insn));
4091 fprintf (loop_dump_stream,
4092 "; in loop: %d; before: %d; %s\n",
4093 info[i].prefetch_in_loop,
4094 info[i].prefetch_before_loop,
4095 info[i].write ? "read/write" : "read only");
4096 fprintf (loop_dump_stream,
4097 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4098 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4099 info[i].bytes_accessed, info[i].total_bytes);
4100 fprintf (loop_dump_stream, " index: ");
4101 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4102 fprintf (loop_dump_stream, "; stride: ");
4103 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4104 fprintf (loop_dump_stream, "; address: ");
4105 print_rtl (loop_dump_stream, info[i].base_address);
4106 fprintf (loop_dump_stream, "\n");
4110 if (num_real_prefetches + num_prefetches_before > 0)
4112 /* Record that this loop uses prefetch instructions. */
4113 LOOP_INFO (loop)->has_prefetch = 1;
4115 if (loop_dump_stream)
4117 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4118 num_real_prefetches, num_real_write_prefetches);
4119 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4120 num_prefetches_before, num_write_prefetches_before);
4124 for (i = 0; i < num_prefetches; i++)
4126 int y;
4128 for (y = 0; y < info[i].prefetch_in_loop; y++)
4130 rtx loc = copy_rtx (*info[i].giv->location);
4131 rtx insn;
4132 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4133 rtx before_insn = info[i].giv->insn;
4134 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4135 rtx seq;
4137 /* We can save some effort by offsetting the address on
4138 architectures with offsettable memory references. */
4139 if (offsettable_address_p (0, VOIDmode, loc))
4140 loc = plus_constant (loc, bytes_ahead);
4141 else
4143 rtx reg = gen_reg_rtx (Pmode);
4144 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4145 GEN_INT (bytes_ahead), reg,
4146 0, before_insn);
4147 loc = reg;
4150 start_sequence ();
4151 /* Make sure the address operand is valid for prefetch. */
4152 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4153 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4154 loc = force_reg (Pmode, loc);
4155 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4156 GEN_INT (3)));
4157 seq = gen_sequence ();
4158 end_sequence ();
4159 emit_insn_before (seq, before_insn);
4161 /* Check all insns emitted and record the new GIV
4162 information. */
4163 insn = NEXT_INSN (prev_insn);
4164 while (insn != before_insn)
4166 insn = check_insn_for_givs (loop, insn,
4167 info[i].giv->always_executed,
4168 info[i].giv->maybe_multiple);
4169 insn = NEXT_INSN (insn);
4173 if (PREFETCH_BEFORE_LOOP)
4175 /* Emit insns before the loop to fetch the first cache lines or,
4176 if we're not prefetching within the loop, everything we expect
4177 to need. */
4178 for (y = 0; y < info[i].prefetch_before_loop; y++)
4180 rtx reg = gen_reg_rtx (Pmode);
4181 rtx loop_start = loop->start;
4182 rtx init_val = info[i].class->initial_value;
4183 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4184 info[i].giv->add_val,
4185 GEN_INT (y * PREFETCH_BLOCK));
4187 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4188 non-constant INIT_VAL to have the same mode as REG, which
4189 in this case we know to be Pmode. */
4190 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4191 init_val = convert_to_mode (Pmode, init_val, 0);
4192 loop_iv_add_mult_emit_before (loop, init_val,
4193 info[i].giv->mult_val,
4194 add_val, reg, 0, loop_start);
4195 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4196 GEN_INT (3)),
4197 loop_start);
4202 return;
4205 /* A "basic induction variable" or biv is a pseudo reg that is set
4206 (within this loop) only by incrementing or decrementing it. */
4207 /* A "general induction variable" or giv is a pseudo reg whose
4208 value is a linear function of a biv. */
4210 /* Bivs are recognized by `basic_induction_var';
4211 Givs by `general_induction_var'. */
4213 /* Communication with routines called via `note_stores'. */
4215 static rtx note_insn;
4217 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4219 static rtx addr_placeholder;
4221 /* ??? Unfinished optimizations, and possible future optimizations,
4222 for the strength reduction code. */
4224 /* ??? The interaction of biv elimination, and recognition of 'constant'
4225 bivs, may cause problems. */
4227 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4228 performance problems.
4230 Perhaps don't eliminate things that can be combined with an addressing
4231 mode. Find all givs that have the same biv, mult_val, and add_val;
4232 then for each giv, check to see if its only use dies in a following
4233 memory address. If so, generate a new memory address and check to see
4234 if it is valid. If it is valid, then store the modified memory address,
4235 otherwise, mark the giv as not done so that it will get its own iv. */
4237 /* ??? Could try to optimize branches when it is known that a biv is always
4238 positive. */
4240 /* ??? When replace a biv in a compare insn, we should replace with closest
4241 giv so that an optimized branch can still be recognized by the combiner,
4242 e.g. the VAX acb insn. */
4244 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4245 was rerun in loop_optimize whenever a register was added or moved.
4246 Also, some of the optimizations could be a little less conservative. */
4248 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4249 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4250 callback.
4252 NOT_EVERY_ITERATION if current insn is not executed at least once for every
4253 loop iteration except for the last one.
4255 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4256 loop iteration.
4258 void
4259 for_each_insn_in_loop (loop, fncall)
4260 struct loop *loop;
4261 loop_insn_callback fncall;
4263 /* This is 1 if current insn is not executed at least once for every loop
4264 iteration. */
4265 int not_every_iteration = 0;
4266 int maybe_multiple = 0;
4267 int past_loop_latch = 0;
4268 int loop_depth = 0;
4269 rtx p;
4271 /* If loop_scan_start points to the loop exit test, we have to be wary of
4272 subversive use of gotos inside expression statements. */
4273 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4274 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4276 /* Scan through loop to find all possible bivs. */
4278 for (p = next_insn_in_loop (loop, loop->scan_start);
4279 p != NULL_RTX;
4280 p = next_insn_in_loop (loop, p))
4282 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4284 /* Past CODE_LABEL, we get to insns that may be executed multiple
4285 times. The only way we can be sure that they can't is if every
4286 jump insn between here and the end of the loop either
4287 returns, exits the loop, is a jump to a location that is still
4288 behind the label, or is a jump to the loop start. */
4290 if (GET_CODE (p) == CODE_LABEL)
4292 rtx insn = p;
4294 maybe_multiple = 0;
4296 while (1)
4298 insn = NEXT_INSN (insn);
4299 if (insn == loop->scan_start)
4300 break;
4301 if (insn == loop->end)
4303 if (loop->top != 0)
4304 insn = loop->top;
4305 else
4306 break;
4307 if (insn == loop->scan_start)
4308 break;
4311 if (GET_CODE (insn) == JUMP_INSN
4312 && GET_CODE (PATTERN (insn)) != RETURN
4313 && (!any_condjump_p (insn)
4314 || (JUMP_LABEL (insn) != 0
4315 && JUMP_LABEL (insn) != loop->scan_start
4316 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4318 maybe_multiple = 1;
4319 break;
4324 /* Past a jump, we get to insns for which we can't count
4325 on whether they will be executed during each iteration. */
4326 /* This code appears twice in strength_reduce. There is also similar
4327 code in scan_loop. */
4328 if (GET_CODE (p) == JUMP_INSN
4329 /* If we enter the loop in the middle, and scan around to the
4330 beginning, don't set not_every_iteration for that.
4331 This can be any kind of jump, since we want to know if insns
4332 will be executed if the loop is executed. */
4333 && !(JUMP_LABEL (p) == loop->top
4334 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4335 && any_uncondjump_p (p))
4336 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4338 rtx label = 0;
4340 /* If this is a jump outside the loop, then it also doesn't
4341 matter. Check to see if the target of this branch is on the
4342 loop->exits_labels list. */
4344 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4345 if (XEXP (label, 0) == JUMP_LABEL (p))
4346 break;
4348 if (!label)
4349 not_every_iteration = 1;
4352 else if (GET_CODE (p) == NOTE)
4354 /* At the virtual top of a converted loop, insns are again known to
4355 be executed each iteration: logically, the loop begins here
4356 even though the exit code has been duplicated.
4358 Insns are also again known to be executed each iteration at
4359 the LOOP_CONT note. */
4360 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4361 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4362 && loop_depth == 0)
4363 not_every_iteration = 0;
4364 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4365 loop_depth++;
4366 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4367 loop_depth--;
4370 /* Note if we pass a loop latch. If we do, then we can not clear
4371 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4372 a loop since a jump before the last CODE_LABEL may have started
4373 a new loop iteration.
4375 Note that LOOP_TOP is only set for rotated loops and we need
4376 this check for all loops, so compare against the CODE_LABEL
4377 which immediately follows LOOP_START. */
4378 if (GET_CODE (p) == JUMP_INSN
4379 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4380 past_loop_latch = 1;
4382 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4383 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4384 or not an insn is known to be executed each iteration of the
4385 loop, whether or not any iterations are known to occur.
4387 Therefore, if we have just passed a label and have no more labels
4388 between here and the test insn of the loop, and we have not passed
4389 a jump to the top of the loop, then we know these insns will be
4390 executed each iteration. */
4392 if (not_every_iteration
4393 && !past_loop_latch
4394 && GET_CODE (p) == CODE_LABEL
4395 && no_labels_between_p (p, loop->end)
4396 && loop_insn_first_p (p, loop->cont))
4397 not_every_iteration = 0;
4401 static void
4402 loop_bivs_find (loop)
4403 struct loop *loop;
4405 struct loop_regs *regs = LOOP_REGS (loop);
4406 struct loop_ivs *ivs = LOOP_IVS (loop);
4407 /* Temporary list pointers for traversing ivs->list. */
4408 struct iv_class *bl, **backbl;
4410 ivs->list = 0;
4412 for_each_insn_in_loop (loop, check_insn_for_bivs);
4414 /* Scan ivs->list to remove all regs that proved not to be bivs.
4415 Make a sanity check against regs->n_times_set. */
4416 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4418 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4419 /* Above happens if register modified by subreg, etc. */
4420 /* Make sure it is not recognized as a basic induction var: */
4421 || regs->array[bl->regno].n_times_set != bl->biv_count
4422 /* If never incremented, it is invariant that we decided not to
4423 move. So leave it alone. */
4424 || ! bl->incremented)
4426 if (loop_dump_stream)
4427 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4428 bl->regno,
4429 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4430 ? "not induction variable"
4431 : (! bl->incremented ? "never incremented"
4432 : "count error")));
4434 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4435 *backbl = bl->next;
4437 else
4439 backbl = &bl->next;
4441 if (loop_dump_stream)
4442 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4448 /* Determine how BIVS are initialised by looking through pre-header
4449 extended basic block. */
4450 static void
4451 loop_bivs_init_find (loop)
4452 struct loop *loop;
4454 struct loop_ivs *ivs = LOOP_IVS (loop);
4455 /* Temporary list pointers for traversing ivs->list. */
4456 struct iv_class *bl;
4457 int call_seen;
4458 rtx p;
4460 /* Find initial value for each biv by searching backwards from loop_start,
4461 halting at first label. Also record any test condition. */
4463 call_seen = 0;
4464 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4466 rtx test;
4468 note_insn = p;
4470 if (GET_CODE (p) == CALL_INSN)
4471 call_seen = 1;
4473 if (INSN_P (p))
4474 note_stores (PATTERN (p), record_initial, ivs);
4476 /* Record any test of a biv that branches around the loop if no store
4477 between it and the start of loop. We only care about tests with
4478 constants and registers and only certain of those. */
4479 if (GET_CODE (p) == JUMP_INSN
4480 && JUMP_LABEL (p) != 0
4481 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4482 && (test = get_condition_for_loop (loop, p)) != 0
4483 && GET_CODE (XEXP (test, 0)) == REG
4484 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4485 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4486 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4487 && bl->init_insn == 0)
4489 /* If an NE test, we have an initial value! */
4490 if (GET_CODE (test) == NE)
4492 bl->init_insn = p;
4493 bl->init_set = gen_rtx_SET (VOIDmode,
4494 XEXP (test, 0), XEXP (test, 1));
4496 else
4497 bl->initial_test = test;
4503 /* Look at the each biv and see if we can say anything better about its
4504 initial value from any initializing insns set up above. (This is done
4505 in two passes to avoid missing SETs in a PARALLEL.) */
4506 static void
4507 loop_bivs_check (loop)
4508 struct loop *loop;
4510 struct loop_ivs *ivs = LOOP_IVS (loop);
4511 /* Temporary list pointers for traversing ivs->list. */
4512 struct iv_class *bl;
4513 struct iv_class **backbl;
4515 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4517 rtx src;
4518 rtx note;
4520 if (! bl->init_insn)
4521 continue;
4523 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4524 is a constant, use the value of that. */
4525 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4526 && CONSTANT_P (XEXP (note, 0)))
4527 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4528 && CONSTANT_P (XEXP (note, 0))))
4529 src = XEXP (note, 0);
4530 else
4531 src = SET_SRC (bl->init_set);
4533 if (loop_dump_stream)
4534 fprintf (loop_dump_stream,
4535 "Biv %d: initialized at insn %d: initial value ",
4536 bl->regno, INSN_UID (bl->init_insn));
4538 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4539 || GET_MODE (src) == VOIDmode)
4540 && valid_initial_value_p (src, bl->init_insn,
4541 LOOP_INFO (loop)->pre_header_has_call,
4542 loop->start))
4544 bl->initial_value = src;
4546 if (loop_dump_stream)
4548 print_simple_rtl (loop_dump_stream, src);
4549 fputc ('\n', loop_dump_stream);
4552 /* If we can't make it a giv,
4553 let biv keep initial value of "itself". */
4554 else if (loop_dump_stream)
4555 fprintf (loop_dump_stream, "is complex\n");
4560 /* Search the loop for general induction variables. */
4562 static void
4563 loop_givs_find (loop)
4564 struct loop* loop;
4566 for_each_insn_in_loop (loop, check_insn_for_givs);
4570 /* For each giv for which we still don't know whether or not it is
4571 replaceable, check to see if it is replaceable because its final value
4572 can be calculated. */
4574 static void
4575 loop_givs_check (loop)
4576 struct loop *loop;
4578 struct loop_ivs *ivs = LOOP_IVS (loop);
4579 struct iv_class *bl;
4581 for (bl = ivs->list; bl; bl = bl->next)
4583 struct induction *v;
4585 for (v = bl->giv; v; v = v->next_iv)
4586 if (! v->replaceable && ! v->not_replaceable)
4587 check_final_value (loop, v);
4592 /* Return non-zero if it is possible to eliminate the biv BL provided
4593 all givs are reduced. This is possible if either the reg is not
4594 used outside the loop, or we can compute what its final value will
4595 be. */
4597 static int
4598 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4599 struct loop *loop;
4600 struct iv_class *bl;
4601 int threshold;
4602 int insn_count;
4604 /* For architectures with a decrement_and_branch_until_zero insn,
4605 don't do this if we put a REG_NONNEG note on the endtest for this
4606 biv. */
4608 #ifdef HAVE_decrement_and_branch_until_zero
4609 if (bl->nonneg)
4611 if (loop_dump_stream)
4612 fprintf (loop_dump_stream,
4613 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4614 return 0;
4616 #endif
4618 /* Check that biv is used outside loop or if it has a final value.
4619 Compare against bl->init_insn rather than loop->start. We aren't
4620 concerned with any uses of the biv between init_insn and
4621 loop->start since these won't be affected by the value of the biv
4622 elsewhere in the function, so long as init_insn doesn't use the
4623 biv itself. */
4625 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4626 && bl->init_insn
4627 && INSN_UID (bl->init_insn) < max_uid_for_loop
4628 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4629 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4630 || (bl->final_value = final_biv_value (loop, bl)))
4631 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4633 if (loop_dump_stream)
4635 fprintf (loop_dump_stream,
4636 "Cannot eliminate biv %d.\n",
4637 bl->regno);
4638 fprintf (loop_dump_stream,
4639 "First use: insn %d, last use: insn %d.\n",
4640 REGNO_FIRST_UID (bl->regno),
4641 REGNO_LAST_UID (bl->regno));
4643 return 0;
4647 /* Reduce each giv of BL that we have decided to reduce. */
4649 static void
4650 loop_givs_reduce (loop, bl)
4651 struct loop *loop;
4652 struct iv_class *bl;
4654 struct induction *v;
4656 for (v = bl->giv; v; v = v->next_iv)
4658 struct induction *tv;
4659 if (! v->ignore && v->same == 0)
4661 int auto_inc_opt = 0;
4663 /* If the code for derived givs immediately below has already
4664 allocated a new_reg, we must keep it. */
4665 if (! v->new_reg)
4666 v->new_reg = gen_reg_rtx (v->mode);
4668 #ifdef AUTO_INC_DEC
4669 /* If the target has auto-increment addressing modes, and
4670 this is an address giv, then try to put the increment
4671 immediately after its use, so that flow can create an
4672 auto-increment addressing mode. */
4673 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4674 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4675 /* We don't handle reversed biv's because bl->biv->insn
4676 does not have a valid INSN_LUID. */
4677 && ! bl->reversed
4678 && v->always_executed && ! v->maybe_multiple
4679 && INSN_UID (v->insn) < max_uid_for_loop)
4681 /* If other giv's have been combined with this one, then
4682 this will work only if all uses of the other giv's occur
4683 before this giv's insn. This is difficult to check.
4685 We simplify this by looking for the common case where
4686 there is one DEST_REG giv, and this giv's insn is the
4687 last use of the dest_reg of that DEST_REG giv. If the
4688 increment occurs after the address giv, then we can
4689 perform the optimization. (Otherwise, the increment
4690 would have to go before other_giv, and we would not be
4691 able to combine it with the address giv to get an
4692 auto-inc address.) */
4693 if (v->combined_with)
4695 struct induction *other_giv = 0;
4697 for (tv = bl->giv; tv; tv = tv->next_iv)
4698 if (tv->same == v)
4700 if (other_giv)
4701 break;
4702 else
4703 other_giv = tv;
4705 if (! tv && other_giv
4706 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4707 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4708 == INSN_UID (v->insn))
4709 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4710 auto_inc_opt = 1;
4712 /* Check for case where increment is before the address
4713 giv. Do this test in "loop order". */
4714 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4715 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4716 || (INSN_LUID (bl->biv->insn)
4717 > INSN_LUID (loop->scan_start))))
4718 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4719 && (INSN_LUID (loop->scan_start)
4720 < INSN_LUID (bl->biv->insn))))
4721 auto_inc_opt = -1;
4722 else
4723 auto_inc_opt = 1;
4725 #ifdef HAVE_cc0
4727 rtx prev;
4729 /* We can't put an insn immediately after one setting
4730 cc0, or immediately before one using cc0. */
4731 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4732 || (auto_inc_opt == -1
4733 && (prev = prev_nonnote_insn (v->insn)) != 0
4734 && INSN_P (prev)
4735 && sets_cc0_p (PATTERN (prev))))
4736 auto_inc_opt = 0;
4738 #endif
4740 if (auto_inc_opt)
4741 v->auto_inc_opt = 1;
4743 #endif
4745 /* For each place where the biv is incremented, add an insn
4746 to increment the new, reduced reg for the giv. */
4747 for (tv = bl->biv; tv; tv = tv->next_iv)
4749 rtx insert_before;
4751 if (! auto_inc_opt)
4752 insert_before = tv->insn;
4753 else if (auto_inc_opt == 1)
4754 insert_before = NEXT_INSN (v->insn);
4755 else
4756 insert_before = v->insn;
4758 if (tv->mult_val == const1_rtx)
4759 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4760 v->new_reg, v->new_reg,
4761 0, insert_before);
4762 else /* tv->mult_val == const0_rtx */
4763 /* A multiply is acceptable here
4764 since this is presumed to be seldom executed. */
4765 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4766 v->add_val, v->new_reg,
4767 0, insert_before);
4770 /* Add code at loop start to initialize giv's reduced reg. */
4772 loop_iv_add_mult_hoist (loop,
4773 extend_value_for_giv (v, bl->initial_value),
4774 v->mult_val, v->add_val, v->new_reg);
4780 /* Check for givs whose first use is their definition and whose
4781 last use is the definition of another giv. If so, it is likely
4782 dead and should not be used to derive another giv nor to
4783 eliminate a biv. */
4785 static void
4786 loop_givs_dead_check (loop, bl)
4787 struct loop *loop ATTRIBUTE_UNUSED;
4788 struct iv_class *bl;
4790 struct induction *v;
4792 for (v = bl->giv; v; v = v->next_iv)
4794 if (v->ignore
4795 || (v->same && v->same->ignore))
4796 continue;
4798 if (v->giv_type == DEST_REG
4799 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4801 struct induction *v1;
4803 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4804 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4805 v->maybe_dead = 1;
4811 static void
4812 loop_givs_rescan (loop, bl, reg_map)
4813 struct loop *loop;
4814 struct iv_class *bl;
4815 rtx *reg_map;
4817 struct induction *v;
4819 for (v = bl->giv; v; v = v->next_iv)
4821 if (v->same && v->same->ignore)
4822 v->ignore = 1;
4824 if (v->ignore)
4825 continue;
4827 /* Update expression if this was combined, in case other giv was
4828 replaced. */
4829 if (v->same)
4830 v->new_reg = replace_rtx (v->new_reg,
4831 v->same->dest_reg, v->same->new_reg);
4833 /* See if this register is known to be a pointer to something. If
4834 so, see if we can find the alignment. First see if there is a
4835 destination register that is a pointer. If so, this shares the
4836 alignment too. Next see if we can deduce anything from the
4837 computational information. If not, and this is a DEST_ADDR
4838 giv, at least we know that it's a pointer, though we don't know
4839 the alignment. */
4840 if (GET_CODE (v->new_reg) == REG
4841 && v->giv_type == DEST_REG
4842 && REG_POINTER (v->dest_reg))
4843 mark_reg_pointer (v->new_reg,
4844 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4845 else if (GET_CODE (v->new_reg) == REG
4846 && REG_POINTER (v->src_reg))
4848 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4850 if (align == 0
4851 || GET_CODE (v->add_val) != CONST_INT
4852 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4853 align = 0;
4855 mark_reg_pointer (v->new_reg, align);
4857 else if (GET_CODE (v->new_reg) == REG
4858 && GET_CODE (v->add_val) == REG
4859 && REG_POINTER (v->add_val))
4861 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4863 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4864 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4865 align = 0;
4867 mark_reg_pointer (v->new_reg, align);
4869 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4870 mark_reg_pointer (v->new_reg, 0);
4872 if (v->giv_type == DEST_ADDR)
4873 /* Store reduced reg as the address in the memref where we found
4874 this giv. */
4875 validate_change (v->insn, v->location, v->new_reg, 0);
4876 else if (v->replaceable)
4878 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4880 else
4882 /* Not replaceable; emit an insn to set the original giv reg from
4883 the reduced giv, same as above. */
4884 loop_insn_emit_after (loop, 0, v->insn,
4885 gen_move_insn (v->dest_reg, v->new_reg));
4888 /* When a loop is reversed, givs which depend on the reversed
4889 biv, and which are live outside the loop, must be set to their
4890 correct final value. This insn is only needed if the giv is
4891 not replaceable. The correct final value is the same as the
4892 value that the giv starts the reversed loop with. */
4893 if (bl->reversed && ! v->replaceable)
4894 loop_iv_add_mult_sink (loop,
4895 extend_value_for_giv (v, bl->initial_value),
4896 v->mult_val, v->add_val, v->dest_reg);
4897 else if (v->final_value)
4898 loop_insn_sink_or_swim (loop,
4899 gen_load_of_final_value (v->dest_reg,
4900 v->final_value));
4902 if (loop_dump_stream)
4904 fprintf (loop_dump_stream, "giv at %d reduced to ",
4905 INSN_UID (v->insn));
4906 print_simple_rtl (loop_dump_stream, v->new_reg);
4907 fprintf (loop_dump_stream, "\n");
4913 static int
4914 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4915 struct loop *loop ATTRIBUTE_UNUSED;
4916 struct iv_class *bl;
4917 struct induction *v;
4918 rtx test_reg;
4920 int add_cost;
4921 int benefit;
4923 benefit = v->benefit;
4924 PUT_MODE (test_reg, v->mode);
4925 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4926 test_reg, test_reg);
4928 /* Reduce benefit if not replaceable, since we will insert a
4929 move-insn to replace the insn that calculates this giv. Don't do
4930 this unless the giv is a user variable, since it will often be
4931 marked non-replaceable because of the duplication of the exit
4932 code outside the loop. In such a case, the copies we insert are
4933 dead and will be deleted. So they don't have a cost. Similar
4934 situations exist. */
4935 /* ??? The new final_[bg]iv_value code does a much better job of
4936 finding replaceable giv's, and hence this code may no longer be
4937 necessary. */
4938 if (! v->replaceable && ! bl->eliminable
4939 && REG_USERVAR_P (v->dest_reg))
4940 benefit -= copy_cost;
4942 /* Decrease the benefit to count the add-insns that we will insert
4943 to increment the reduced reg for the giv. ??? This can
4944 overestimate the run-time cost of the additional insns, e.g. if
4945 there are multiple basic blocks that increment the biv, but only
4946 one of these blocks is executed during each iteration. There is
4947 no good way to detect cases like this with the current structure
4948 of the loop optimizer. This code is more accurate for
4949 determining code size than run-time benefits. */
4950 benefit -= add_cost * bl->biv_count;
4952 /* Decide whether to strength-reduce this giv or to leave the code
4953 unchanged (recompute it from the biv each time it is used). This
4954 decision can be made independently for each giv. */
4956 #ifdef AUTO_INC_DEC
4957 /* Attempt to guess whether autoincrement will handle some of the
4958 new add insns; if so, increase BENEFIT (undo the subtraction of
4959 add_cost that was done above). */
4960 if (v->giv_type == DEST_ADDR
4961 /* Increasing the benefit is risky, since this is only a guess.
4962 Avoid increasing register pressure in cases where there would
4963 be no other benefit from reducing this giv. */
4964 && benefit > 0
4965 && GET_CODE (v->mult_val) == CONST_INT)
4967 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4969 if (HAVE_POST_INCREMENT
4970 && INTVAL (v->mult_val) == size)
4971 benefit += add_cost * bl->biv_count;
4972 else if (HAVE_PRE_INCREMENT
4973 && INTVAL (v->mult_val) == size)
4974 benefit += add_cost * bl->biv_count;
4975 else if (HAVE_POST_DECREMENT
4976 && -INTVAL (v->mult_val) == size)
4977 benefit += add_cost * bl->biv_count;
4978 else if (HAVE_PRE_DECREMENT
4979 && -INTVAL (v->mult_val) == size)
4980 benefit += add_cost * bl->biv_count;
4982 #endif
4984 return benefit;
4988 /* Free IV structures for LOOP. */
4990 static void
4991 loop_ivs_free (loop)
4992 struct loop *loop;
4994 struct loop_ivs *ivs = LOOP_IVS (loop);
4995 struct iv_class *iv = ivs->list;
4997 free (ivs->regs);
4999 while (iv)
5001 struct iv_class *next = iv->next;
5002 struct induction *induction;
5003 struct induction *next_induction;
5005 for (induction = iv->biv; induction; induction = next_induction)
5007 next_induction = induction->next_iv;
5008 free (induction);
5010 for (induction = iv->giv; induction; induction = next_induction)
5012 next_induction = induction->next_iv;
5013 free (induction);
5016 free (iv);
5017 iv = next;
5022 /* Perform strength reduction and induction variable elimination.
5024 Pseudo registers created during this function will be beyond the
5025 last valid index in several tables including
5026 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5027 problem here, because the added registers cannot be givs outside of
5028 their loop, and hence will never be reconsidered. But scan_loop
5029 must check regnos to make sure they are in bounds. */
5031 static void
5032 strength_reduce (loop, flags)
5033 struct loop *loop;
5034 int flags;
5036 struct loop_info *loop_info = LOOP_INFO (loop);
5037 struct loop_regs *regs = LOOP_REGS (loop);
5038 struct loop_ivs *ivs = LOOP_IVS (loop);
5039 rtx p;
5040 /* Temporary list pointer for traversing ivs->list. */
5041 struct iv_class *bl;
5042 /* Ratio of extra register life span we can justify
5043 for saving an instruction. More if loop doesn't call subroutines
5044 since in that case saving an insn makes more difference
5045 and more registers are available. */
5046 /* ??? could set this to last value of threshold in move_movables */
5047 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5048 /* Map of pseudo-register replacements. */
5049 rtx *reg_map = NULL;
5050 int reg_map_size;
5051 int unrolled_insn_copies = 0;
5052 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5053 int insn_count = count_insns_in_loop (loop);
5055 addr_placeholder = gen_reg_rtx (Pmode);
5057 ivs->n_regs = max_reg_before_loop;
5058 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5060 /* Find all BIVs in loop. */
5061 loop_bivs_find (loop);
5063 /* Exit if there are no bivs. */
5064 if (! ivs->list)
5066 /* Can still unroll the loop anyways, but indicate that there is no
5067 strength reduction info available. */
5068 if (flags & LOOP_UNROLL)
5069 unroll_loop (loop, insn_count, 0);
5071 loop_ivs_free (loop);
5072 return;
5075 /* Determine how BIVS are initialised by looking through pre-header
5076 extended basic block. */
5077 loop_bivs_init_find (loop);
5079 /* Look at the each biv and see if we can say anything better about its
5080 initial value from any initializing insns set up above. */
5081 loop_bivs_check (loop);
5083 /* Search the loop for general induction variables. */
5084 loop_givs_find (loop);
5086 /* Try to calculate and save the number of loop iterations. This is
5087 set to zero if the actual number can not be calculated. This must
5088 be called after all giv's have been identified, since otherwise it may
5089 fail if the iteration variable is a giv. */
5090 loop_iterations (loop);
5092 #ifdef HAVE_prefetch
5093 if (flags & LOOP_PREFETCH)
5094 emit_prefetch_instructions (loop);
5095 #endif
5097 /* Now for each giv for which we still don't know whether or not it is
5098 replaceable, check to see if it is replaceable because its final value
5099 can be calculated. This must be done after loop_iterations is called,
5100 so that final_giv_value will work correctly. */
5101 loop_givs_check (loop);
5103 /* Try to prove that the loop counter variable (if any) is always
5104 nonnegative; if so, record that fact with a REG_NONNEG note
5105 so that "decrement and branch until zero" insn can be used. */
5106 check_dbra_loop (loop, insn_count);
5108 /* Create reg_map to hold substitutions for replaceable giv regs.
5109 Some givs might have been made from biv increments, so look at
5110 ivs->reg_iv_type for a suitable size. */
5111 reg_map_size = ivs->n_regs;
5112 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5114 /* Examine each iv class for feasibility of strength reduction/induction
5115 variable elimination. */
5117 for (bl = ivs->list; bl; bl = bl->next)
5119 struct induction *v;
5120 int benefit;
5122 /* Test whether it will be possible to eliminate this biv
5123 provided all givs are reduced. */
5124 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5126 /* This will be true at the end, if all givs which depend on this
5127 biv have been strength reduced.
5128 We can't (currently) eliminate the biv unless this is so. */
5129 bl->all_reduced = 1;
5131 /* Check each extension dependent giv in this class to see if its
5132 root biv is safe from wrapping in the interior mode. */
5133 check_ext_dependent_givs (bl, loop_info);
5135 /* Combine all giv's for this iv_class. */
5136 combine_givs (regs, bl);
5138 for (v = bl->giv; v; v = v->next_iv)
5140 struct induction *tv;
5142 if (v->ignore || v->same)
5143 continue;
5145 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5147 /* If an insn is not to be strength reduced, then set its ignore
5148 flag, and clear bl->all_reduced. */
5150 /* A giv that depends on a reversed biv must be reduced if it is
5151 used after the loop exit, otherwise, it would have the wrong
5152 value after the loop exit. To make it simple, just reduce all
5153 of such giv's whether or not we know they are used after the loop
5154 exit. */
5156 if (! flag_reduce_all_givs
5157 && v->lifetime * threshold * benefit < insn_count
5158 && ! bl->reversed)
5160 if (loop_dump_stream)
5161 fprintf (loop_dump_stream,
5162 "giv of insn %d not worth while, %d vs %d.\n",
5163 INSN_UID (v->insn),
5164 v->lifetime * threshold * benefit, insn_count);
5165 v->ignore = 1;
5166 bl->all_reduced = 0;
5168 else
5170 /* Check that we can increment the reduced giv without a
5171 multiply insn. If not, reject it. */
5173 for (tv = bl->biv; tv; tv = tv->next_iv)
5174 if (tv->mult_val == const1_rtx
5175 && ! product_cheap_p (tv->add_val, v->mult_val))
5177 if (loop_dump_stream)
5178 fprintf (loop_dump_stream,
5179 "giv of insn %d: would need a multiply.\n",
5180 INSN_UID (v->insn));
5181 v->ignore = 1;
5182 bl->all_reduced = 0;
5183 break;
5188 /* Check for givs whose first use is their definition and whose
5189 last use is the definition of another giv. If so, it is likely
5190 dead and should not be used to derive another giv nor to
5191 eliminate a biv. */
5192 loop_givs_dead_check (loop, bl);
5194 /* Reduce each giv that we decided to reduce. */
5195 loop_givs_reduce (loop, bl);
5197 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5198 as not reduced.
5200 For each giv register that can be reduced now: if replaceable,
5201 substitute reduced reg wherever the old giv occurs;
5202 else add new move insn "giv_reg = reduced_reg". */
5203 loop_givs_rescan (loop, bl, reg_map);
5205 /* All the givs based on the biv bl have been reduced if they
5206 merit it. */
5208 /* For each giv not marked as maybe dead that has been combined with a
5209 second giv, clear any "maybe dead" mark on that second giv.
5210 v->new_reg will either be or refer to the register of the giv it
5211 combined with.
5213 Doing this clearing avoids problems in biv elimination where
5214 a giv's new_reg is a complex value that can't be put in the
5215 insn but the giv combined with (with a reg as new_reg) is
5216 marked maybe_dead. Since the register will be used in either
5217 case, we'd prefer it be used from the simpler giv. */
5219 for (v = bl->giv; v; v = v->next_iv)
5220 if (! v->maybe_dead && v->same)
5221 v->same->maybe_dead = 0;
5223 /* Try to eliminate the biv, if it is a candidate.
5224 This won't work if ! bl->all_reduced,
5225 since the givs we planned to use might not have been reduced.
5227 We have to be careful that we didn't initially think we could
5228 eliminate this biv because of a giv that we now think may be
5229 dead and shouldn't be used as a biv replacement.
5231 Also, there is the possibility that we may have a giv that looks
5232 like it can be used to eliminate a biv, but the resulting insn
5233 isn't valid. This can happen, for example, on the 88k, where a
5234 JUMP_INSN can compare a register only with zero. Attempts to
5235 replace it with a compare with a constant will fail.
5237 Note that in cases where this call fails, we may have replaced some
5238 of the occurrences of the biv with a giv, but no harm was done in
5239 doing so in the rare cases where it can occur. */
5241 if (bl->all_reduced == 1 && bl->eliminable
5242 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5244 /* ?? If we created a new test to bypass the loop entirely,
5245 or otherwise drop straight in, based on this test, then
5246 we might want to rewrite it also. This way some later
5247 pass has more hope of removing the initialization of this
5248 biv entirely. */
5250 /* If final_value != 0, then the biv may be used after loop end
5251 and we must emit an insn to set it just in case.
5253 Reversed bivs already have an insn after the loop setting their
5254 value, so we don't need another one. We can't calculate the
5255 proper final value for such a biv here anyways. */
5256 if (bl->final_value && ! bl->reversed)
5257 loop_insn_sink_or_swim (loop,
5258 gen_load_of_final_value (bl->biv->dest_reg,
5259 bl->final_value));
5261 if (loop_dump_stream)
5262 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5263 bl->regno);
5265 /* See above note wrt final_value. But since we couldn't eliminate
5266 the biv, we must set the value after the loop instead of before. */
5267 else if (bl->final_value && ! bl->reversed)
5268 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5269 bl->final_value));
5272 /* Go through all the instructions in the loop, making all the
5273 register substitutions scheduled in REG_MAP. */
5275 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5276 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5277 || GET_CODE (p) == CALL_INSN)
5279 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5280 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5281 INSN_CODE (p) = -1;
5284 if (loop_info->n_iterations > 0)
5286 /* When we completely unroll a loop we will likely not need the increment
5287 of the loop BIV and we will not need the conditional branch at the
5288 end of the loop. */
5289 unrolled_insn_copies = insn_count - 2;
5291 #ifdef HAVE_cc0
5292 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5293 need the comparison before the conditional branch at the end of the
5294 loop. */
5295 unrolled_insn_copies -= 1;
5296 #endif
5298 /* We'll need one copy for each loop iteration. */
5299 unrolled_insn_copies *= loop_info->n_iterations;
5301 /* A little slop to account for the ability to remove initialization
5302 code, better CSE, and other secondary benefits of completely
5303 unrolling some loops. */
5304 unrolled_insn_copies -= 1;
5306 /* Clamp the value. */
5307 if (unrolled_insn_copies < 0)
5308 unrolled_insn_copies = 0;
5311 /* Unroll loops from within strength reduction so that we can use the
5312 induction variable information that strength_reduce has already
5313 collected. Always unroll loops that would be as small or smaller
5314 unrolled than when rolled. */
5315 if ((flags & LOOP_UNROLL)
5316 || (!(flags & LOOP_FIRST_PASS)
5317 && loop_info->n_iterations > 0
5318 && unrolled_insn_copies <= insn_count))
5319 unroll_loop (loop, insn_count, 1);
5321 #ifdef HAVE_doloop_end
5322 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5323 doloop_optimize (loop);
5324 #endif /* HAVE_doloop_end */
5326 /* In case number of iterations is known, drop branch prediction note
5327 in the branch. Do that only in second loop pass, as loop unrolling
5328 may change the number of iterations performed. */
5329 if (flags & LOOP_BCT)
5331 unsigned HOST_WIDE_INT n
5332 = loop_info->n_iterations / loop_info->unroll_number;
5333 if (n > 1)
5334 predict_insn (PREV_INSN (loop->end), PRED_LOOP_ITERATIONS,
5335 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5338 if (loop_dump_stream)
5339 fprintf (loop_dump_stream, "\n");
5341 loop_ivs_free (loop);
5342 if (reg_map)
5343 free (reg_map);
5346 /*Record all basic induction variables calculated in the insn. */
5347 static rtx
5348 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5349 struct loop *loop;
5350 rtx p;
5351 int not_every_iteration;
5352 int maybe_multiple;
5354 struct loop_ivs *ivs = LOOP_IVS (loop);
5355 rtx set;
5356 rtx dest_reg;
5357 rtx inc_val;
5358 rtx mult_val;
5359 rtx *location;
5361 if (GET_CODE (p) == INSN
5362 && (set = single_set (p))
5363 && GET_CODE (SET_DEST (set)) == REG)
5365 dest_reg = SET_DEST (set);
5366 if (REGNO (dest_reg) < max_reg_before_loop
5367 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5368 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5370 if (basic_induction_var (loop, SET_SRC (set),
5371 GET_MODE (SET_SRC (set)),
5372 dest_reg, p, &inc_val, &mult_val,
5373 &location))
5375 /* It is a possible basic induction variable.
5376 Create and initialize an induction structure for it. */
5378 struct induction *v
5379 = (struct induction *) xmalloc (sizeof (struct induction));
5381 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5382 not_every_iteration, maybe_multiple);
5383 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5385 else if (REGNO (dest_reg) < ivs->n_regs)
5386 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5389 return p;
5392 /* Record all givs calculated in the insn.
5393 A register is a giv if: it is only set once, it is a function of a
5394 biv and a constant (or invariant), and it is not a biv. */
5395 static rtx
5396 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5397 struct loop *loop;
5398 rtx p;
5399 int not_every_iteration;
5400 int maybe_multiple;
5402 struct loop_regs *regs = LOOP_REGS (loop);
5404 rtx set;
5405 /* Look for a general induction variable in a register. */
5406 if (GET_CODE (p) == INSN
5407 && (set = single_set (p))
5408 && GET_CODE (SET_DEST (set)) == REG
5409 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5411 rtx src_reg;
5412 rtx dest_reg;
5413 rtx add_val;
5414 rtx mult_val;
5415 rtx ext_val;
5416 int benefit;
5417 rtx regnote = 0;
5418 rtx last_consec_insn;
5420 dest_reg = SET_DEST (set);
5421 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5422 return p;
5424 if (/* SET_SRC is a giv. */
5425 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5426 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5427 /* Equivalent expression is a giv. */
5428 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5429 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5430 &add_val, &mult_val, &ext_val, 0,
5431 &benefit, VOIDmode)))
5432 /* Don't try to handle any regs made by loop optimization.
5433 We have nothing on them in regno_first_uid, etc. */
5434 && REGNO (dest_reg) < max_reg_before_loop
5435 /* Don't recognize a BASIC_INDUCT_VAR here. */
5436 && dest_reg != src_reg
5437 /* This must be the only place where the register is set. */
5438 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5439 /* or all sets must be consecutive and make a giv. */
5440 || (benefit = consec_sets_giv (loop, benefit, p,
5441 src_reg, dest_reg,
5442 &add_val, &mult_val, &ext_val,
5443 &last_consec_insn))))
5445 struct induction *v
5446 = (struct induction *) xmalloc (sizeof (struct induction));
5448 /* If this is a library call, increase benefit. */
5449 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5450 benefit += libcall_benefit (p);
5452 /* Skip the consecutive insns, if there are any. */
5453 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5454 p = last_consec_insn;
5456 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5457 ext_val, benefit, DEST_REG, not_every_iteration,
5458 maybe_multiple, (rtx*) 0);
5463 #ifndef DONT_REDUCE_ADDR
5464 /* Look for givs which are memory addresses. */
5465 /* This resulted in worse code on a VAX 8600. I wonder if it
5466 still does. */
5467 if (GET_CODE (p) == INSN)
5468 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5469 maybe_multiple);
5470 #endif
5472 /* Update the status of whether giv can derive other givs. This can
5473 change when we pass a label or an insn that updates a biv. */
5474 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5475 || GET_CODE (p) == CODE_LABEL)
5476 update_giv_derive (loop, p);
5477 return p;
5480 /* Return 1 if X is a valid source for an initial value (or as value being
5481 compared against in an initial test).
5483 X must be either a register or constant and must not be clobbered between
5484 the current insn and the start of the loop.
5486 INSN is the insn containing X. */
5488 static int
5489 valid_initial_value_p (x, insn, call_seen, loop_start)
5490 rtx x;
5491 rtx insn;
5492 int call_seen;
5493 rtx loop_start;
5495 if (CONSTANT_P (x))
5496 return 1;
5498 /* Only consider pseudos we know about initialized in insns whose luids
5499 we know. */
5500 if (GET_CODE (x) != REG
5501 || REGNO (x) >= max_reg_before_loop)
5502 return 0;
5504 /* Don't use call-clobbered registers across a call which clobbers it. On
5505 some machines, don't use any hard registers at all. */
5506 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5507 && (SMALL_REGISTER_CLASSES
5508 || (call_used_regs[REGNO (x)] && call_seen)))
5509 return 0;
5511 /* Don't use registers that have been clobbered before the start of the
5512 loop. */
5513 if (reg_set_between_p (x, insn, loop_start))
5514 return 0;
5516 return 1;
5519 /* Scan X for memory refs and check each memory address
5520 as a possible giv. INSN is the insn whose pattern X comes from.
5521 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5522 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5523 more thanonce in each loop iteration. */
5525 static void
5526 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5527 const struct loop *loop;
5528 rtx x;
5529 rtx insn;
5530 int not_every_iteration, maybe_multiple;
5532 int i, j;
5533 enum rtx_code code;
5534 const char *fmt;
5536 if (x == 0)
5537 return;
5539 code = GET_CODE (x);
5540 switch (code)
5542 case REG:
5543 case CONST_INT:
5544 case CONST:
5545 case CONST_DOUBLE:
5546 case SYMBOL_REF:
5547 case LABEL_REF:
5548 case PC:
5549 case CC0:
5550 case ADDR_VEC:
5551 case ADDR_DIFF_VEC:
5552 case USE:
5553 case CLOBBER:
5554 return;
5556 case MEM:
5558 rtx src_reg;
5559 rtx add_val;
5560 rtx mult_val;
5561 rtx ext_val;
5562 int benefit;
5564 /* This code used to disable creating GIVs with mult_val == 1 and
5565 add_val == 0. However, this leads to lost optimizations when
5566 it comes time to combine a set of related DEST_ADDR GIVs, since
5567 this one would not be seen. */
5569 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5570 &mult_val, &ext_val, 1, &benefit,
5571 GET_MODE (x)))
5573 /* Found one; record it. */
5574 struct induction *v
5575 = (struct induction *) xmalloc (sizeof (struct induction));
5577 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5578 add_val, ext_val, benefit, DEST_ADDR,
5579 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5581 v->mem = x;
5584 return;
5586 default:
5587 break;
5590 /* Recursively scan the subexpressions for other mem refs. */
5592 fmt = GET_RTX_FORMAT (code);
5593 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5594 if (fmt[i] == 'e')
5595 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5596 maybe_multiple);
5597 else if (fmt[i] == 'E')
5598 for (j = 0; j < XVECLEN (x, i); j++)
5599 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5600 maybe_multiple);
5603 /* Fill in the data about one biv update.
5604 V is the `struct induction' in which we record the biv. (It is
5605 allocated by the caller, with alloca.)
5606 INSN is the insn that sets it.
5607 DEST_REG is the biv's reg.
5609 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5610 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5611 being set to INC_VAL.
5613 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5614 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5615 can be executed more than once per iteration. If MAYBE_MULTIPLE
5616 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5617 executed exactly once per iteration. */
5619 static void
5620 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5621 not_every_iteration, maybe_multiple)
5622 struct loop *loop;
5623 struct induction *v;
5624 rtx insn;
5625 rtx dest_reg;
5626 rtx inc_val;
5627 rtx mult_val;
5628 rtx *location;
5629 int not_every_iteration;
5630 int maybe_multiple;
5632 struct loop_ivs *ivs = LOOP_IVS (loop);
5633 struct iv_class *bl;
5635 v->insn = insn;
5636 v->src_reg = dest_reg;
5637 v->dest_reg = dest_reg;
5638 v->mult_val = mult_val;
5639 v->add_val = inc_val;
5640 v->ext_dependent = NULL_RTX;
5641 v->location = location;
5642 v->mode = GET_MODE (dest_reg);
5643 v->always_computable = ! not_every_iteration;
5644 v->always_executed = ! not_every_iteration;
5645 v->maybe_multiple = maybe_multiple;
5647 /* Add this to the reg's iv_class, creating a class
5648 if this is the first incrementation of the reg. */
5650 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5651 if (bl == 0)
5653 /* Create and initialize new iv_class. */
5655 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5657 bl->regno = REGNO (dest_reg);
5658 bl->biv = 0;
5659 bl->giv = 0;
5660 bl->biv_count = 0;
5661 bl->giv_count = 0;
5663 /* Set initial value to the reg itself. */
5664 bl->initial_value = dest_reg;
5665 bl->final_value = 0;
5666 /* We haven't seen the initializing insn yet */
5667 bl->init_insn = 0;
5668 bl->init_set = 0;
5669 bl->initial_test = 0;
5670 bl->incremented = 0;
5671 bl->eliminable = 0;
5672 bl->nonneg = 0;
5673 bl->reversed = 0;
5674 bl->total_benefit = 0;
5676 /* Add this class to ivs->list. */
5677 bl->next = ivs->list;
5678 ivs->list = bl;
5680 /* Put it in the array of biv register classes. */
5681 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5684 /* Update IV_CLASS entry for this biv. */
5685 v->next_iv = bl->biv;
5686 bl->biv = v;
5687 bl->biv_count++;
5688 if (mult_val == const1_rtx)
5689 bl->incremented = 1;
5691 if (loop_dump_stream)
5692 loop_biv_dump (v, loop_dump_stream, 0);
5695 /* Fill in the data about one giv.
5696 V is the `struct induction' in which we record the giv. (It is
5697 allocated by the caller, with alloca.)
5698 INSN is the insn that sets it.
5699 BENEFIT estimates the savings from deleting this insn.
5700 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5701 into a register or is used as a memory address.
5703 SRC_REG is the biv reg which the giv is computed from.
5704 DEST_REG is the giv's reg (if the giv is stored in a reg).
5705 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5706 LOCATION points to the place where this giv's value appears in INSN. */
5708 static void
5709 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5710 benefit, type, not_every_iteration, maybe_multiple, location)
5711 const struct loop *loop;
5712 struct induction *v;
5713 rtx insn;
5714 rtx src_reg;
5715 rtx dest_reg;
5716 rtx mult_val, add_val, ext_val;
5717 int benefit;
5718 enum g_types type;
5719 int not_every_iteration, maybe_multiple;
5720 rtx *location;
5722 struct loop_ivs *ivs = LOOP_IVS (loop);
5723 struct induction *b;
5724 struct iv_class *bl;
5725 rtx set = single_set (insn);
5726 rtx temp;
5728 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5729 undo the MULT canonicalization that we performed earlier. */
5730 temp = simplify_rtx (add_val);
5731 if (temp
5732 && ! (GET_CODE (add_val) == MULT
5733 && GET_CODE (temp) == ASHIFT))
5734 add_val = temp;
5736 v->insn = insn;
5737 v->src_reg = src_reg;
5738 v->giv_type = type;
5739 v->dest_reg = dest_reg;
5740 v->mult_val = mult_val;
5741 v->add_val = add_val;
5742 v->ext_dependent = ext_val;
5743 v->benefit = benefit;
5744 v->location = location;
5745 v->cant_derive = 0;
5746 v->combined_with = 0;
5747 v->maybe_multiple = maybe_multiple;
5748 v->maybe_dead = 0;
5749 v->derive_adjustment = 0;
5750 v->same = 0;
5751 v->ignore = 0;
5752 v->new_reg = 0;
5753 v->final_value = 0;
5754 v->same_insn = 0;
5755 v->auto_inc_opt = 0;
5756 v->unrolled = 0;
5757 v->shared = 0;
5759 /* The v->always_computable field is used in update_giv_derive, to
5760 determine whether a giv can be used to derive another giv. For a
5761 DEST_REG giv, INSN computes a new value for the giv, so its value
5762 isn't computable if INSN insn't executed every iteration.
5763 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5764 it does not compute a new value. Hence the value is always computable
5765 regardless of whether INSN is executed each iteration. */
5767 if (type == DEST_ADDR)
5768 v->always_computable = 1;
5769 else
5770 v->always_computable = ! not_every_iteration;
5772 v->always_executed = ! not_every_iteration;
5774 if (type == DEST_ADDR)
5776 v->mode = GET_MODE (*location);
5777 v->lifetime = 1;
5779 else /* type == DEST_REG */
5781 v->mode = GET_MODE (SET_DEST (set));
5783 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5785 /* If the lifetime is zero, it means that this register is
5786 really a dead store. So mark this as a giv that can be
5787 ignored. This will not prevent the biv from being eliminated. */
5788 if (v->lifetime == 0)
5789 v->ignore = 1;
5791 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5792 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5795 /* Add the giv to the class of givs computed from one biv. */
5797 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5798 if (bl)
5800 v->next_iv = bl->giv;
5801 bl->giv = v;
5802 /* Don't count DEST_ADDR. This is supposed to count the number of
5803 insns that calculate givs. */
5804 if (type == DEST_REG)
5805 bl->giv_count++;
5806 bl->total_benefit += benefit;
5808 else
5809 /* Fatal error, biv missing for this giv? */
5810 abort ();
5812 if (type == DEST_ADDR)
5813 v->replaceable = 1;
5814 else
5816 /* The giv can be replaced outright by the reduced register only if all
5817 of the following conditions are true:
5818 - the insn that sets the giv is always executed on any iteration
5819 on which the giv is used at all
5820 (there are two ways to deduce this:
5821 either the insn is executed on every iteration,
5822 or all uses follow that insn in the same basic block),
5823 - the giv is not used outside the loop
5824 - no assignments to the biv occur during the giv's lifetime. */
5826 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5827 /* Previous line always fails if INSN was moved by loop opt. */
5828 && REGNO_LAST_LUID (REGNO (dest_reg))
5829 < INSN_LUID (loop->end)
5830 && (! not_every_iteration
5831 || last_use_this_basic_block (dest_reg, insn)))
5833 /* Now check that there are no assignments to the biv within the
5834 giv's lifetime. This requires two separate checks. */
5836 /* Check each biv update, and fail if any are between the first
5837 and last use of the giv.
5839 If this loop contains an inner loop that was unrolled, then
5840 the insn modifying the biv may have been emitted by the loop
5841 unrolling code, and hence does not have a valid luid. Just
5842 mark the biv as not replaceable in this case. It is not very
5843 useful as a biv, because it is used in two different loops.
5844 It is very unlikely that we would be able to optimize the giv
5845 using this biv anyways. */
5847 v->replaceable = 1;
5848 for (b = bl->biv; b; b = b->next_iv)
5850 if (INSN_UID (b->insn) >= max_uid_for_loop
5851 || ((INSN_LUID (b->insn)
5852 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5853 && (INSN_LUID (b->insn)
5854 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5856 v->replaceable = 0;
5857 v->not_replaceable = 1;
5858 break;
5862 /* If there are any backwards branches that go from after the
5863 biv update to before it, then this giv is not replaceable. */
5864 if (v->replaceable)
5865 for (b = bl->biv; b; b = b->next_iv)
5866 if (back_branch_in_range_p (loop, b->insn))
5868 v->replaceable = 0;
5869 v->not_replaceable = 1;
5870 break;
5873 else
5875 /* May still be replaceable, we don't have enough info here to
5876 decide. */
5877 v->replaceable = 0;
5878 v->not_replaceable = 0;
5882 /* Record whether the add_val contains a const_int, for later use by
5883 combine_givs. */
5885 rtx tem = add_val;
5887 v->no_const_addval = 1;
5888 if (tem == const0_rtx)
5890 else if (CONSTANT_P (add_val))
5891 v->no_const_addval = 0;
5892 if (GET_CODE (tem) == PLUS)
5894 while (1)
5896 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5897 tem = XEXP (tem, 0);
5898 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5899 tem = XEXP (tem, 1);
5900 else
5901 break;
5903 if (CONSTANT_P (XEXP (tem, 1)))
5904 v->no_const_addval = 0;
5908 if (loop_dump_stream)
5909 loop_giv_dump (v, loop_dump_stream, 0);
5912 /* All this does is determine whether a giv can be made replaceable because
5913 its final value can be calculated. This code can not be part of record_giv
5914 above, because final_giv_value requires that the number of loop iterations
5915 be known, and that can not be accurately calculated until after all givs
5916 have been identified. */
5918 static void
5919 check_final_value (loop, v)
5920 const struct loop *loop;
5921 struct induction *v;
5923 struct loop_ivs *ivs = LOOP_IVS (loop);
5924 struct iv_class *bl;
5925 rtx final_value = 0;
5927 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5929 /* DEST_ADDR givs will never reach here, because they are always marked
5930 replaceable above in record_giv. */
5932 /* The giv can be replaced outright by the reduced register only if all
5933 of the following conditions are true:
5934 - the insn that sets the giv is always executed on any iteration
5935 on which the giv is used at all
5936 (there are two ways to deduce this:
5937 either the insn is executed on every iteration,
5938 or all uses follow that insn in the same basic block),
5939 - its final value can be calculated (this condition is different
5940 than the one above in record_giv)
5941 - it's not used before the it's set
5942 - no assignments to the biv occur during the giv's lifetime. */
5944 #if 0
5945 /* This is only called now when replaceable is known to be false. */
5946 /* Clear replaceable, so that it won't confuse final_giv_value. */
5947 v->replaceable = 0;
5948 #endif
5950 if ((final_value = final_giv_value (loop, v))
5951 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5953 int biv_increment_seen = 0, before_giv_insn = 0;
5954 rtx p = v->insn;
5955 rtx last_giv_use;
5957 v->replaceable = 1;
5959 /* When trying to determine whether or not a biv increment occurs
5960 during the lifetime of the giv, we can ignore uses of the variable
5961 outside the loop because final_value is true. Hence we can not
5962 use regno_last_uid and regno_first_uid as above in record_giv. */
5964 /* Search the loop to determine whether any assignments to the
5965 biv occur during the giv's lifetime. Start with the insn
5966 that sets the giv, and search around the loop until we come
5967 back to that insn again.
5969 Also fail if there is a jump within the giv's lifetime that jumps
5970 to somewhere outside the lifetime but still within the loop. This
5971 catches spaghetti code where the execution order is not linear, and
5972 hence the above test fails. Here we assume that the giv lifetime
5973 does not extend from one iteration of the loop to the next, so as
5974 to make the test easier. Since the lifetime isn't known yet,
5975 this requires two loops. See also record_giv above. */
5977 last_giv_use = v->insn;
5979 while (1)
5981 p = NEXT_INSN (p);
5982 if (p == loop->end)
5984 before_giv_insn = 1;
5985 p = NEXT_INSN (loop->start);
5987 if (p == v->insn)
5988 break;
5990 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5991 || GET_CODE (p) == CALL_INSN)
5993 /* It is possible for the BIV increment to use the GIV if we
5994 have a cycle. Thus we must be sure to check each insn for
5995 both BIV and GIV uses, and we must check for BIV uses
5996 first. */
5998 if (! biv_increment_seen
5999 && reg_set_p (v->src_reg, PATTERN (p)))
6000 biv_increment_seen = 1;
6002 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6004 if (biv_increment_seen || before_giv_insn)
6006 v->replaceable = 0;
6007 v->not_replaceable = 1;
6008 break;
6010 last_giv_use = p;
6015 /* Now that the lifetime of the giv is known, check for branches
6016 from within the lifetime to outside the lifetime if it is still
6017 replaceable. */
6019 if (v->replaceable)
6021 p = v->insn;
6022 while (1)
6024 p = NEXT_INSN (p);
6025 if (p == loop->end)
6026 p = NEXT_INSN (loop->start);
6027 if (p == last_giv_use)
6028 break;
6030 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6031 && LABEL_NAME (JUMP_LABEL (p))
6032 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6033 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6034 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6035 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6037 v->replaceable = 0;
6038 v->not_replaceable = 1;
6040 if (loop_dump_stream)
6041 fprintf (loop_dump_stream,
6042 "Found branch outside giv lifetime.\n");
6044 break;
6049 /* If it is replaceable, then save the final value. */
6050 if (v->replaceable)
6051 v->final_value = final_value;
6054 if (loop_dump_stream && v->replaceable)
6055 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6056 INSN_UID (v->insn), REGNO (v->dest_reg));
6059 /* Update the status of whether a giv can derive other givs.
6061 We need to do something special if there is or may be an update to the biv
6062 between the time the giv is defined and the time it is used to derive
6063 another giv.
6065 In addition, a giv that is only conditionally set is not allowed to
6066 derive another giv once a label has been passed.
6068 The cases we look at are when a label or an update to a biv is passed. */
6070 static void
6071 update_giv_derive (loop, p)
6072 const struct loop *loop;
6073 rtx p;
6075 struct loop_ivs *ivs = LOOP_IVS (loop);
6076 struct iv_class *bl;
6077 struct induction *biv, *giv;
6078 rtx tem;
6079 int dummy;
6081 /* Search all IV classes, then all bivs, and finally all givs.
6083 There are three cases we are concerned with. First we have the situation
6084 of a giv that is only updated conditionally. In that case, it may not
6085 derive any givs after a label is passed.
6087 The second case is when a biv update occurs, or may occur, after the
6088 definition of a giv. For certain biv updates (see below) that are
6089 known to occur between the giv definition and use, we can adjust the
6090 giv definition. For others, or when the biv update is conditional,
6091 we must prevent the giv from deriving any other givs. There are two
6092 sub-cases within this case.
6094 If this is a label, we are concerned with any biv update that is done
6095 conditionally, since it may be done after the giv is defined followed by
6096 a branch here (actually, we need to pass both a jump and a label, but
6097 this extra tracking doesn't seem worth it).
6099 If this is a jump, we are concerned about any biv update that may be
6100 executed multiple times. We are actually only concerned about
6101 backward jumps, but it is probably not worth performing the test
6102 on the jump again here.
6104 If this is a biv update, we must adjust the giv status to show that a
6105 subsequent biv update was performed. If this adjustment cannot be done,
6106 the giv cannot derive further givs. */
6108 for (bl = ivs->list; bl; bl = bl->next)
6109 for (biv = bl->biv; biv; biv = biv->next_iv)
6110 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6111 || biv->insn == p)
6113 for (giv = bl->giv; giv; giv = giv->next_iv)
6115 /* If cant_derive is already true, there is no point in
6116 checking all of these conditions again. */
6117 if (giv->cant_derive)
6118 continue;
6120 /* If this giv is conditionally set and we have passed a label,
6121 it cannot derive anything. */
6122 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6123 giv->cant_derive = 1;
6125 /* Skip givs that have mult_val == 0, since
6126 they are really invariants. Also skip those that are
6127 replaceable, since we know their lifetime doesn't contain
6128 any biv update. */
6129 else if (giv->mult_val == const0_rtx || giv->replaceable)
6130 continue;
6132 /* The only way we can allow this giv to derive another
6133 is if this is a biv increment and we can form the product
6134 of biv->add_val and giv->mult_val. In this case, we will
6135 be able to compute a compensation. */
6136 else if (biv->insn == p)
6138 rtx ext_val_dummy;
6140 tem = 0;
6141 if (biv->mult_val == const1_rtx)
6142 tem = simplify_giv_expr (loop,
6143 gen_rtx_MULT (giv->mode,
6144 biv->add_val,
6145 giv->mult_val),
6146 &ext_val_dummy, &dummy);
6148 if (tem && giv->derive_adjustment)
6149 tem = simplify_giv_expr
6150 (loop,
6151 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6152 &ext_val_dummy, &dummy);
6154 if (tem)
6155 giv->derive_adjustment = tem;
6156 else
6157 giv->cant_derive = 1;
6159 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6160 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6161 giv->cant_derive = 1;
6166 /* Check whether an insn is an increment legitimate for a basic induction var.
6167 X is the source of insn P, or a part of it.
6168 MODE is the mode in which X should be interpreted.
6170 DEST_REG is the putative biv, also the destination of the insn.
6171 We accept patterns of these forms:
6172 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6173 REG = INVARIANT + REG
6175 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6176 store the additive term into *INC_VAL, and store the place where
6177 we found the additive term into *LOCATION.
6179 If X is an assignment of an invariant into DEST_REG, we set
6180 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6182 We also want to detect a BIV when it corresponds to a variable
6183 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6184 of the variable may be a PLUS that adds a SUBREG of that variable to
6185 an invariant and then sign- or zero-extends the result of the PLUS
6186 into the variable.
6188 Most GIVs in such cases will be in the promoted mode, since that is the
6189 probably the natural computation mode (and almost certainly the mode
6190 used for addresses) on the machine. So we view the pseudo-reg containing
6191 the variable as the BIV, as if it were simply incremented.
6193 Note that treating the entire pseudo as a BIV will result in making
6194 simple increments to any GIVs based on it. However, if the variable
6195 overflows in its declared mode but not its promoted mode, the result will
6196 be incorrect. This is acceptable if the variable is signed, since
6197 overflows in such cases are undefined, but not if it is unsigned, since
6198 those overflows are defined. So we only check for SIGN_EXTEND and
6199 not ZERO_EXTEND.
6201 If we cannot find a biv, we return 0. */
6203 static int
6204 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6205 const struct loop *loop;
6206 rtx x;
6207 enum machine_mode mode;
6208 rtx dest_reg;
6209 rtx p;
6210 rtx *inc_val;
6211 rtx *mult_val;
6212 rtx **location;
6214 enum rtx_code code;
6215 rtx *argp, arg;
6216 rtx insn, set = 0;
6218 code = GET_CODE (x);
6219 *location = NULL;
6220 switch (code)
6222 case PLUS:
6223 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6224 || (GET_CODE (XEXP (x, 0)) == SUBREG
6225 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6226 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6228 argp = &XEXP (x, 1);
6230 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6231 || (GET_CODE (XEXP (x, 1)) == SUBREG
6232 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6233 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6235 argp = &XEXP (x, 0);
6237 else
6238 return 0;
6240 arg = *argp;
6241 if (loop_invariant_p (loop, arg) != 1)
6242 return 0;
6244 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6245 *mult_val = const1_rtx;
6246 *location = argp;
6247 return 1;
6249 case SUBREG:
6250 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6251 handle addition of promoted variables.
6252 ??? The comment at the start of this function is wrong: promoted
6253 variable increments don't look like it says they do. */
6254 return basic_induction_var (loop, SUBREG_REG (x),
6255 GET_MODE (SUBREG_REG (x)),
6256 dest_reg, p, inc_val, mult_val, location);
6258 case REG:
6259 /* If this register is assigned in a previous insn, look at its
6260 source, but don't go outside the loop or past a label. */
6262 /* If this sets a register to itself, we would repeat any previous
6263 biv increment if we applied this strategy blindly. */
6264 if (rtx_equal_p (dest_reg, x))
6265 return 0;
6267 insn = p;
6268 while (1)
6270 rtx dest;
6273 insn = PREV_INSN (insn);
6275 while (insn && GET_CODE (insn) == NOTE
6276 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6278 if (!insn)
6279 break;
6280 set = single_set (insn);
6281 if (set == 0)
6282 break;
6283 dest = SET_DEST (set);
6284 if (dest == x
6285 || (GET_CODE (dest) == SUBREG
6286 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6287 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6288 && SUBREG_REG (dest) == x))
6289 return basic_induction_var (loop, SET_SRC (set),
6290 (GET_MODE (SET_SRC (set)) == VOIDmode
6291 ? GET_MODE (x)
6292 : GET_MODE (SET_SRC (set))),
6293 dest_reg, insn,
6294 inc_val, mult_val, location);
6296 while (GET_CODE (dest) == SIGN_EXTRACT
6297 || GET_CODE (dest) == ZERO_EXTRACT
6298 || GET_CODE (dest) == SUBREG
6299 || GET_CODE (dest) == STRICT_LOW_PART)
6300 dest = XEXP (dest, 0);
6301 if (dest == x)
6302 break;
6304 /* Fall through. */
6306 /* Can accept constant setting of biv only when inside inner most loop.
6307 Otherwise, a biv of an inner loop may be incorrectly recognized
6308 as a biv of the outer loop,
6309 causing code to be moved INTO the inner loop. */
6310 case MEM:
6311 if (loop_invariant_p (loop, x) != 1)
6312 return 0;
6313 case CONST_INT:
6314 case SYMBOL_REF:
6315 case CONST:
6316 /* convert_modes aborts if we try to convert to or from CCmode, so just
6317 exclude that case. It is very unlikely that a condition code value
6318 would be a useful iterator anyways. convert_modes aborts if we try to
6319 convert a float mode to non-float or vice versa too. */
6320 if (loop->level == 1
6321 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6322 && GET_MODE_CLASS (mode) != MODE_CC)
6324 /* Possible bug here? Perhaps we don't know the mode of X. */
6325 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6326 *mult_val = const0_rtx;
6327 return 1;
6329 else
6330 return 0;
6332 case SIGN_EXTEND:
6333 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6334 dest_reg, p, inc_val, mult_val, location);
6336 case ASHIFTRT:
6337 /* Similar, since this can be a sign extension. */
6338 for (insn = PREV_INSN (p);
6339 (insn && GET_CODE (insn) == NOTE
6340 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6341 insn = PREV_INSN (insn))
6344 if (insn)
6345 set = single_set (insn);
6347 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6348 && set && SET_DEST (set) == XEXP (x, 0)
6349 && GET_CODE (XEXP (x, 1)) == CONST_INT
6350 && INTVAL (XEXP (x, 1)) >= 0
6351 && GET_CODE (SET_SRC (set)) == ASHIFT
6352 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6353 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6354 GET_MODE (XEXP (x, 0)),
6355 dest_reg, insn, inc_val, mult_val,
6356 location);
6357 return 0;
6359 default:
6360 return 0;
6364 /* A general induction variable (giv) is any quantity that is a linear
6365 function of a basic induction variable,
6366 i.e. giv = biv * mult_val + add_val.
6367 The coefficients can be any loop invariant quantity.
6368 A giv need not be computed directly from the biv;
6369 it can be computed by way of other givs. */
6371 /* Determine whether X computes a giv.
6372 If it does, return a nonzero value
6373 which is the benefit from eliminating the computation of X;
6374 set *SRC_REG to the register of the biv that it is computed from;
6375 set *ADD_VAL and *MULT_VAL to the coefficients,
6376 such that the value of X is biv * mult + add; */
6378 static int
6379 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6380 is_addr, pbenefit, addr_mode)
6381 const struct loop *loop;
6382 rtx x;
6383 rtx *src_reg;
6384 rtx *add_val;
6385 rtx *mult_val;
6386 rtx *ext_val;
6387 int is_addr;
6388 int *pbenefit;
6389 enum machine_mode addr_mode;
6391 struct loop_ivs *ivs = LOOP_IVS (loop);
6392 rtx orig_x = x;
6394 /* If this is an invariant, forget it, it isn't a giv. */
6395 if (loop_invariant_p (loop, x) == 1)
6396 return 0;
6398 *pbenefit = 0;
6399 *ext_val = NULL_RTX;
6400 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6401 if (x == 0)
6402 return 0;
6404 switch (GET_CODE (x))
6406 case USE:
6407 case CONST_INT:
6408 /* Since this is now an invariant and wasn't before, it must be a giv
6409 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6410 with. */
6411 *src_reg = ivs->list->biv->dest_reg;
6412 *mult_val = const0_rtx;
6413 *add_val = x;
6414 break;
6416 case REG:
6417 /* This is equivalent to a BIV. */
6418 *src_reg = x;
6419 *mult_val = const1_rtx;
6420 *add_val = const0_rtx;
6421 break;
6423 case PLUS:
6424 /* Either (plus (biv) (invar)) or
6425 (plus (mult (biv) (invar_1)) (invar_2)). */
6426 if (GET_CODE (XEXP (x, 0)) == MULT)
6428 *src_reg = XEXP (XEXP (x, 0), 0);
6429 *mult_val = XEXP (XEXP (x, 0), 1);
6431 else
6433 *src_reg = XEXP (x, 0);
6434 *mult_val = const1_rtx;
6436 *add_val = XEXP (x, 1);
6437 break;
6439 case MULT:
6440 /* ADD_VAL is zero. */
6441 *src_reg = XEXP (x, 0);
6442 *mult_val = XEXP (x, 1);
6443 *add_val = const0_rtx;
6444 break;
6446 default:
6447 abort ();
6450 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6451 unless they are CONST_INT). */
6452 if (GET_CODE (*add_val) == USE)
6453 *add_val = XEXP (*add_val, 0);
6454 if (GET_CODE (*mult_val) == USE)
6455 *mult_val = XEXP (*mult_val, 0);
6457 if (is_addr)
6458 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6459 else
6460 *pbenefit += rtx_cost (orig_x, SET);
6462 /* Always return true if this is a giv so it will be detected as such,
6463 even if the benefit is zero or negative. This allows elimination
6464 of bivs that might otherwise not be eliminated. */
6465 return 1;
6468 /* Given an expression, X, try to form it as a linear function of a biv.
6469 We will canonicalize it to be of the form
6470 (plus (mult (BIV) (invar_1))
6471 (invar_2))
6472 with possible degeneracies.
6474 The invariant expressions must each be of a form that can be used as a
6475 machine operand. We surround then with a USE rtx (a hack, but localized
6476 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6477 routine; it is the caller's responsibility to strip them.
6479 If no such canonicalization is possible (i.e., two biv's are used or an
6480 expression that is neither invariant nor a biv or giv), this routine
6481 returns 0.
6483 For a non-zero return, the result will have a code of CONST_INT, USE,
6484 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6486 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6488 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6489 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6491 static rtx
6492 simplify_giv_expr (loop, x, ext_val, benefit)
6493 const struct loop *loop;
6494 rtx x;
6495 rtx *ext_val;
6496 int *benefit;
6498 struct loop_ivs *ivs = LOOP_IVS (loop);
6499 struct loop_regs *regs = LOOP_REGS (loop);
6500 enum machine_mode mode = GET_MODE (x);
6501 rtx arg0, arg1;
6502 rtx tem;
6504 /* If this is not an integer mode, or if we cannot do arithmetic in this
6505 mode, this can't be a giv. */
6506 if (mode != VOIDmode
6507 && (GET_MODE_CLASS (mode) != MODE_INT
6508 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6509 return NULL_RTX;
6511 switch (GET_CODE (x))
6513 case PLUS:
6514 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6515 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6516 if (arg0 == 0 || arg1 == 0)
6517 return NULL_RTX;
6519 /* Put constant last, CONST_INT last if both constant. */
6520 if ((GET_CODE (arg0) == USE
6521 || GET_CODE (arg0) == CONST_INT)
6522 && ! ((GET_CODE (arg0) == USE
6523 && GET_CODE (arg1) == USE)
6524 || GET_CODE (arg1) == CONST_INT))
6525 tem = arg0, arg0 = arg1, arg1 = tem;
6527 /* Handle addition of zero, then addition of an invariant. */
6528 if (arg1 == const0_rtx)
6529 return arg0;
6530 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6531 switch (GET_CODE (arg0))
6533 case CONST_INT:
6534 case USE:
6535 /* Adding two invariants must result in an invariant, so enclose
6536 addition operation inside a USE and return it. */
6537 if (GET_CODE (arg0) == USE)
6538 arg0 = XEXP (arg0, 0);
6539 if (GET_CODE (arg1) == USE)
6540 arg1 = XEXP (arg1, 0);
6542 if (GET_CODE (arg0) == CONST_INT)
6543 tem = arg0, arg0 = arg1, arg1 = tem;
6544 if (GET_CODE (arg1) == CONST_INT)
6545 tem = sge_plus_constant (arg0, arg1);
6546 else
6547 tem = sge_plus (mode, arg0, arg1);
6549 if (GET_CODE (tem) != CONST_INT)
6550 tem = gen_rtx_USE (mode, tem);
6551 return tem;
6553 case REG:
6554 case MULT:
6555 /* biv + invar or mult + invar. Return sum. */
6556 return gen_rtx_PLUS (mode, arg0, arg1);
6558 case PLUS:
6559 /* (a + invar_1) + invar_2. Associate. */
6560 return
6561 simplify_giv_expr (loop,
6562 gen_rtx_PLUS (mode,
6563 XEXP (arg0, 0),
6564 gen_rtx_PLUS (mode,
6565 XEXP (arg0, 1),
6566 arg1)),
6567 ext_val, benefit);
6569 default:
6570 abort ();
6573 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6574 MULT to reduce cases. */
6575 if (GET_CODE (arg0) == REG)
6576 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6577 if (GET_CODE (arg1) == REG)
6578 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6580 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6581 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6582 Recurse to associate the second PLUS. */
6583 if (GET_CODE (arg1) == MULT)
6584 tem = arg0, arg0 = arg1, arg1 = tem;
6586 if (GET_CODE (arg1) == PLUS)
6587 return
6588 simplify_giv_expr (loop,
6589 gen_rtx_PLUS (mode,
6590 gen_rtx_PLUS (mode, arg0,
6591 XEXP (arg1, 0)),
6592 XEXP (arg1, 1)),
6593 ext_val, benefit);
6595 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6596 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6597 return NULL_RTX;
6599 if (!rtx_equal_p (arg0, arg1))
6600 return NULL_RTX;
6602 return simplify_giv_expr (loop,
6603 gen_rtx_MULT (mode,
6604 XEXP (arg0, 0),
6605 gen_rtx_PLUS (mode,
6606 XEXP (arg0, 1),
6607 XEXP (arg1, 1))),
6608 ext_val, benefit);
6610 case MINUS:
6611 /* Handle "a - b" as "a + b * (-1)". */
6612 return simplify_giv_expr (loop,
6613 gen_rtx_PLUS (mode,
6614 XEXP (x, 0),
6615 gen_rtx_MULT (mode,
6616 XEXP (x, 1),
6617 constm1_rtx)),
6618 ext_val, benefit);
6620 case MULT:
6621 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6622 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6623 if (arg0 == 0 || arg1 == 0)
6624 return NULL_RTX;
6626 /* Put constant last, CONST_INT last if both constant. */
6627 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6628 && GET_CODE (arg1) != CONST_INT)
6629 tem = arg0, arg0 = arg1, arg1 = tem;
6631 /* If second argument is not now constant, not giv. */
6632 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6633 return NULL_RTX;
6635 /* Handle multiply by 0 or 1. */
6636 if (arg1 == const0_rtx)
6637 return const0_rtx;
6639 else if (arg1 == const1_rtx)
6640 return arg0;
6642 switch (GET_CODE (arg0))
6644 case REG:
6645 /* biv * invar. Done. */
6646 return gen_rtx_MULT (mode, arg0, arg1);
6648 case CONST_INT:
6649 /* Product of two constants. */
6650 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6652 case USE:
6653 /* invar * invar is a giv, but attempt to simplify it somehow. */
6654 if (GET_CODE (arg1) != CONST_INT)
6655 return NULL_RTX;
6657 arg0 = XEXP (arg0, 0);
6658 if (GET_CODE (arg0) == MULT)
6660 /* (invar_0 * invar_1) * invar_2. Associate. */
6661 return simplify_giv_expr (loop,
6662 gen_rtx_MULT (mode,
6663 XEXP (arg0, 0),
6664 gen_rtx_MULT (mode,
6665 XEXP (arg0,
6667 arg1)),
6668 ext_val, benefit);
6670 /* Porpagate the MULT expressions to the intermost nodes. */
6671 else if (GET_CODE (arg0) == PLUS)
6673 /* (invar_0 + invar_1) * invar_2. Distribute. */
6674 return simplify_giv_expr (loop,
6675 gen_rtx_PLUS (mode,
6676 gen_rtx_MULT (mode,
6677 XEXP (arg0,
6679 arg1),
6680 gen_rtx_MULT (mode,
6681 XEXP (arg0,
6683 arg1)),
6684 ext_val, benefit);
6686 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6688 case MULT:
6689 /* (a * invar_1) * invar_2. Associate. */
6690 return simplify_giv_expr (loop,
6691 gen_rtx_MULT (mode,
6692 XEXP (arg0, 0),
6693 gen_rtx_MULT (mode,
6694 XEXP (arg0, 1),
6695 arg1)),
6696 ext_val, benefit);
6698 case PLUS:
6699 /* (a + invar_1) * invar_2. Distribute. */
6700 return simplify_giv_expr (loop,
6701 gen_rtx_PLUS (mode,
6702 gen_rtx_MULT (mode,
6703 XEXP (arg0, 0),
6704 arg1),
6705 gen_rtx_MULT (mode,
6706 XEXP (arg0, 1),
6707 arg1)),
6708 ext_val, benefit);
6710 default:
6711 abort ();
6714 case ASHIFT:
6715 /* Shift by constant is multiply by power of two. */
6716 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6717 return 0;
6719 return
6720 simplify_giv_expr (loop,
6721 gen_rtx_MULT (mode,
6722 XEXP (x, 0),
6723 GEN_INT ((HOST_WIDE_INT) 1
6724 << INTVAL (XEXP (x, 1)))),
6725 ext_val, benefit);
6727 case NEG:
6728 /* "-a" is "a * (-1)" */
6729 return simplify_giv_expr (loop,
6730 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6731 ext_val, benefit);
6733 case NOT:
6734 /* "~a" is "-a - 1". Silly, but easy. */
6735 return simplify_giv_expr (loop,
6736 gen_rtx_MINUS (mode,
6737 gen_rtx_NEG (mode, XEXP (x, 0)),
6738 const1_rtx),
6739 ext_val, benefit);
6741 case USE:
6742 /* Already in proper form for invariant. */
6743 return x;
6745 case SIGN_EXTEND:
6746 case ZERO_EXTEND:
6747 case TRUNCATE:
6748 /* Conditionally recognize extensions of simple IVs. After we've
6749 computed loop traversal counts and verified the range of the
6750 source IV, we'll reevaluate this as a GIV. */
6751 if (*ext_val == NULL_RTX)
6753 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6754 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6756 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6757 return arg0;
6760 goto do_default;
6762 case REG:
6763 /* If this is a new register, we can't deal with it. */
6764 if (REGNO (x) >= max_reg_before_loop)
6765 return 0;
6767 /* Check for biv or giv. */
6768 switch (REG_IV_TYPE (ivs, REGNO (x)))
6770 case BASIC_INDUCT:
6771 return x;
6772 case GENERAL_INDUCT:
6774 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6776 /* Form expression from giv and add benefit. Ensure this giv
6777 can derive another and subtract any needed adjustment if so. */
6779 /* Increasing the benefit here is risky. The only case in which it
6780 is arguably correct is if this is the only use of V. In other
6781 cases, this will artificially inflate the benefit of the current
6782 giv, and lead to suboptimal code. Thus, it is disabled, since
6783 potentially not reducing an only marginally beneficial giv is
6784 less harmful than reducing many givs that are not really
6785 beneficial. */
6787 rtx single_use = regs->array[REGNO (x)].single_usage;
6788 if (single_use && single_use != const0_rtx)
6789 *benefit += v->benefit;
6792 if (v->cant_derive)
6793 return 0;
6795 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6796 v->src_reg, v->mult_val),
6797 v->add_val);
6799 if (v->derive_adjustment)
6800 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6801 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6802 if (*ext_val)
6804 if (!v->ext_dependent)
6805 return arg0;
6807 else
6809 *ext_val = v->ext_dependent;
6810 return arg0;
6812 return 0;
6815 default:
6816 do_default:
6817 /* If it isn't an induction variable, and it is invariant, we
6818 may be able to simplify things further by looking through
6819 the bits we just moved outside the loop. */
6820 if (loop_invariant_p (loop, x) == 1)
6822 struct movable *m;
6823 struct loop_movables *movables = LOOP_MOVABLES (loop);
6825 for (m = movables->head; m; m = m->next)
6826 if (rtx_equal_p (x, m->set_dest))
6828 /* Ok, we found a match. Substitute and simplify. */
6830 /* If we match another movable, we must use that, as
6831 this one is going away. */
6832 if (m->match)
6833 return simplify_giv_expr (loop, m->match->set_dest,
6834 ext_val, benefit);
6836 /* If consec is non-zero, this is a member of a group of
6837 instructions that were moved together. We handle this
6838 case only to the point of seeking to the last insn and
6839 looking for a REG_EQUAL. Fail if we don't find one. */
6840 if (m->consec != 0)
6842 int i = m->consec;
6843 tem = m->insn;
6846 tem = NEXT_INSN (tem);
6848 while (--i > 0);
6850 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6851 if (tem)
6852 tem = XEXP (tem, 0);
6854 else
6856 tem = single_set (m->insn);
6857 if (tem)
6858 tem = SET_SRC (tem);
6861 if (tem)
6863 /* What we are most interested in is pointer
6864 arithmetic on invariants -- only take
6865 patterns we may be able to do something with. */
6866 if (GET_CODE (tem) == PLUS
6867 || GET_CODE (tem) == MULT
6868 || GET_CODE (tem) == ASHIFT
6869 || GET_CODE (tem) == CONST_INT
6870 || GET_CODE (tem) == SYMBOL_REF)
6872 tem = simplify_giv_expr (loop, tem, ext_val,
6873 benefit);
6874 if (tem)
6875 return tem;
6877 else if (GET_CODE (tem) == CONST
6878 && GET_CODE (XEXP (tem, 0)) == PLUS
6879 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6880 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6882 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6883 ext_val, benefit);
6884 if (tem)
6885 return tem;
6888 break;
6891 break;
6894 /* Fall through to general case. */
6895 default:
6896 /* If invariant, return as USE (unless CONST_INT).
6897 Otherwise, not giv. */
6898 if (GET_CODE (x) == USE)
6899 x = XEXP (x, 0);
6901 if (loop_invariant_p (loop, x) == 1)
6903 if (GET_CODE (x) == CONST_INT)
6904 return x;
6905 if (GET_CODE (x) == CONST
6906 && GET_CODE (XEXP (x, 0)) == PLUS
6907 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6908 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6909 x = XEXP (x, 0);
6910 return gen_rtx_USE (mode, x);
6912 else
6913 return 0;
6917 /* This routine folds invariants such that there is only ever one
6918 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6920 static rtx
6921 sge_plus_constant (x, c)
6922 rtx x, c;
6924 if (GET_CODE (x) == CONST_INT)
6925 return GEN_INT (INTVAL (x) + INTVAL (c));
6926 else if (GET_CODE (x) != PLUS)
6927 return gen_rtx_PLUS (GET_MODE (x), x, c);
6928 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6930 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6931 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6933 else if (GET_CODE (XEXP (x, 0)) == PLUS
6934 || GET_CODE (XEXP (x, 1)) != PLUS)
6936 return gen_rtx_PLUS (GET_MODE (x),
6937 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6939 else
6941 return gen_rtx_PLUS (GET_MODE (x),
6942 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6946 static rtx
6947 sge_plus (mode, x, y)
6948 enum machine_mode mode;
6949 rtx x, y;
6951 while (GET_CODE (y) == PLUS)
6953 rtx a = XEXP (y, 0);
6954 if (GET_CODE (a) == CONST_INT)
6955 x = sge_plus_constant (x, a);
6956 else
6957 x = gen_rtx_PLUS (mode, x, a);
6958 y = XEXP (y, 1);
6960 if (GET_CODE (y) == CONST_INT)
6961 x = sge_plus_constant (x, y);
6962 else
6963 x = gen_rtx_PLUS (mode, x, y);
6964 return x;
6967 /* Help detect a giv that is calculated by several consecutive insns;
6968 for example,
6969 giv = biv * M
6970 giv = giv + A
6971 The caller has already identified the first insn P as having a giv as dest;
6972 we check that all other insns that set the same register follow
6973 immediately after P, that they alter nothing else,
6974 and that the result of the last is still a giv.
6976 The value is 0 if the reg set in P is not really a giv.
6977 Otherwise, the value is the amount gained by eliminating
6978 all the consecutive insns that compute the value.
6980 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6981 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6983 The coefficients of the ultimate giv value are stored in
6984 *MULT_VAL and *ADD_VAL. */
6986 static int
6987 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6988 add_val, mult_val, ext_val, last_consec_insn)
6989 const struct loop *loop;
6990 int first_benefit;
6991 rtx p;
6992 rtx src_reg;
6993 rtx dest_reg;
6994 rtx *add_val;
6995 rtx *mult_val;
6996 rtx *ext_val;
6997 rtx *last_consec_insn;
6999 struct loop_ivs *ivs = LOOP_IVS (loop);
7000 struct loop_regs *regs = LOOP_REGS (loop);
7001 int count;
7002 enum rtx_code code;
7003 int benefit;
7004 rtx temp;
7005 rtx set;
7007 /* Indicate that this is a giv so that we can update the value produced in
7008 each insn of the multi-insn sequence.
7010 This induction structure will be used only by the call to
7011 general_induction_var below, so we can allocate it on our stack.
7012 If this is a giv, our caller will replace the induct var entry with
7013 a new induction structure. */
7014 struct induction *v;
7016 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7017 return 0;
7019 v = (struct induction *) alloca (sizeof (struct induction));
7020 v->src_reg = src_reg;
7021 v->mult_val = *mult_val;
7022 v->add_val = *add_val;
7023 v->benefit = first_benefit;
7024 v->cant_derive = 0;
7025 v->derive_adjustment = 0;
7026 v->ext_dependent = NULL_RTX;
7028 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7029 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7031 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7033 while (count > 0)
7035 p = NEXT_INSN (p);
7036 code = GET_CODE (p);
7038 /* If libcall, skip to end of call sequence. */
7039 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7040 p = XEXP (temp, 0);
7042 if (code == INSN
7043 && (set = single_set (p))
7044 && GET_CODE (SET_DEST (set)) == REG
7045 && SET_DEST (set) == dest_reg
7046 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7047 add_val, mult_val, ext_val, 0,
7048 &benefit, VOIDmode)
7049 /* Giv created by equivalent expression. */
7050 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7051 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7052 add_val, mult_val, ext_val, 0,
7053 &benefit, VOIDmode)))
7054 && src_reg == v->src_reg)
7056 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7057 benefit += libcall_benefit (p);
7059 count--;
7060 v->mult_val = *mult_val;
7061 v->add_val = *add_val;
7062 v->benefit += benefit;
7064 else if (code != NOTE)
7066 /* Allow insns that set something other than this giv to a
7067 constant. Such insns are needed on machines which cannot
7068 include long constants and should not disqualify a giv. */
7069 if (code == INSN
7070 && (set = single_set (p))
7071 && SET_DEST (set) != dest_reg
7072 && CONSTANT_P (SET_SRC (set)))
7073 continue;
7075 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7076 return 0;
7080 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7081 *last_consec_insn = p;
7082 return v->benefit;
7085 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7086 represented by G1. If no such expression can be found, or it is clear that
7087 it cannot possibly be a valid address, 0 is returned.
7089 To perform the computation, we note that
7090 G1 = x * v + a and
7091 G2 = y * v + b
7092 where `v' is the biv.
7094 So G2 = (y/b) * G1 + (b - a*y/x).
7096 Note that MULT = y/x.
7098 Update: A and B are now allowed to be additive expressions such that
7099 B contains all variables in A. That is, computing B-A will not require
7100 subtracting variables. */
7102 static rtx
7103 express_from_1 (a, b, mult)
7104 rtx a, b, mult;
7106 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7108 if (mult == const0_rtx)
7109 return b;
7111 /* If MULT is not 1, we cannot handle A with non-constants, since we
7112 would then be required to subtract multiples of the registers in A.
7113 This is theoretically possible, and may even apply to some Fortran
7114 constructs, but it is a lot of work and we do not attempt it here. */
7116 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7117 return NULL_RTX;
7119 /* In general these structures are sorted top to bottom (down the PLUS
7120 chain), but not left to right across the PLUS. If B is a higher
7121 order giv than A, we can strip one level and recurse. If A is higher
7122 order, we'll eventually bail out, but won't know that until the end.
7123 If they are the same, we'll strip one level around this loop. */
7125 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7127 rtx ra, rb, oa, ob, tmp;
7129 ra = XEXP (a, 0), oa = XEXP (a, 1);
7130 if (GET_CODE (ra) == PLUS)
7131 tmp = ra, ra = oa, oa = tmp;
7133 rb = XEXP (b, 0), ob = XEXP (b, 1);
7134 if (GET_CODE (rb) == PLUS)
7135 tmp = rb, rb = ob, ob = tmp;
7137 if (rtx_equal_p (ra, rb))
7138 /* We matched: remove one reg completely. */
7139 a = oa, b = ob;
7140 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7141 /* An alternate match. */
7142 a = oa, b = rb;
7143 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7144 /* An alternate match. */
7145 a = ra, b = ob;
7146 else
7148 /* Indicates an extra register in B. Strip one level from B and
7149 recurse, hoping B was the higher order expression. */
7150 ob = express_from_1 (a, ob, mult);
7151 if (ob == NULL_RTX)
7152 return NULL_RTX;
7153 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7157 /* Here we are at the last level of A, go through the cases hoping to
7158 get rid of everything but a constant. */
7160 if (GET_CODE (a) == PLUS)
7162 rtx ra, oa;
7164 ra = XEXP (a, 0), oa = XEXP (a, 1);
7165 if (rtx_equal_p (oa, b))
7166 oa = ra;
7167 else if (!rtx_equal_p (ra, b))
7168 return NULL_RTX;
7170 if (GET_CODE (oa) != CONST_INT)
7171 return NULL_RTX;
7173 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7175 else if (GET_CODE (a) == CONST_INT)
7177 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7179 else if (CONSTANT_P (a))
7181 enum machine_mode mode_a = GET_MODE (a);
7182 enum machine_mode mode_b = GET_MODE (b);
7183 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7184 return simplify_gen_binary (MINUS, mode, b, a);
7186 else if (GET_CODE (b) == PLUS)
7188 if (rtx_equal_p (a, XEXP (b, 0)))
7189 return XEXP (b, 1);
7190 else if (rtx_equal_p (a, XEXP (b, 1)))
7191 return XEXP (b, 0);
7192 else
7193 return NULL_RTX;
7195 else if (rtx_equal_p (a, b))
7196 return const0_rtx;
7198 return NULL_RTX;
7202 express_from (g1, g2)
7203 struct induction *g1, *g2;
7205 rtx mult, add;
7207 /* The value that G1 will be multiplied by must be a constant integer. Also,
7208 the only chance we have of getting a valid address is if b*c/a (see above
7209 for notation) is also an integer. */
7210 if (GET_CODE (g1->mult_val) == CONST_INT
7211 && GET_CODE (g2->mult_val) == CONST_INT)
7213 if (g1->mult_val == const0_rtx
7214 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7215 return NULL_RTX;
7216 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7218 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7219 mult = const1_rtx;
7220 else
7222 /* ??? Find out if the one is a multiple of the other? */
7223 return NULL_RTX;
7226 add = express_from_1 (g1->add_val, g2->add_val, mult);
7227 if (add == NULL_RTX)
7229 /* Failed. If we've got a multiplication factor between G1 and G2,
7230 scale G1's addend and try again. */
7231 if (INTVAL (mult) > 1)
7233 rtx g1_add_val = g1->add_val;
7234 if (GET_CODE (g1_add_val) == MULT
7235 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7237 HOST_WIDE_INT m;
7238 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7239 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7240 XEXP (g1_add_val, 0), GEN_INT (m));
7242 else
7244 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7245 mult);
7248 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7251 if (add == NULL_RTX)
7252 return NULL_RTX;
7254 /* Form simplified final result. */
7255 if (mult == const0_rtx)
7256 return add;
7257 else if (mult == const1_rtx)
7258 mult = g1->dest_reg;
7259 else
7260 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7262 if (add == const0_rtx)
7263 return mult;
7264 else
7266 if (GET_CODE (add) == PLUS
7267 && CONSTANT_P (XEXP (add, 1)))
7269 rtx tem = XEXP (add, 1);
7270 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7271 add = tem;
7274 return gen_rtx_PLUS (g2->mode, mult, add);
7278 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7279 represented by G1. This indicates that G2 should be combined with G1 and
7280 that G2 can use (either directly or via an address expression) a register
7281 used to represent G1. */
7283 static rtx
7284 combine_givs_p (g1, g2)
7285 struct induction *g1, *g2;
7287 rtx comb, ret;
7289 /* With the introduction of ext dependent givs, we must care for modes.
7290 G2 must not use a wider mode than G1. */
7291 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7292 return NULL_RTX;
7294 ret = comb = express_from (g1, g2);
7295 if (comb == NULL_RTX)
7296 return NULL_RTX;
7297 if (g1->mode != g2->mode)
7298 ret = gen_lowpart (g2->mode, comb);
7300 /* If these givs are identical, they can be combined. We use the results
7301 of express_from because the addends are not in a canonical form, so
7302 rtx_equal_p is a weaker test. */
7303 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7304 combination to be the other way round. */
7305 if (comb == g1->dest_reg
7306 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7308 return ret;
7311 /* If G2 can be expressed as a function of G1 and that function is valid
7312 as an address and no more expensive than using a register for G2,
7313 the expression of G2 in terms of G1 can be used. */
7314 if (ret != NULL_RTX
7315 && g2->giv_type == DEST_ADDR
7316 && memory_address_p (GET_MODE (g2->mem), ret)
7317 /* ??? Looses, especially with -fforce-addr, where *g2->location
7318 will always be a register, and so anything more complicated
7319 gets discarded. */
7320 #if 0
7321 #ifdef ADDRESS_COST
7322 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7323 #else
7324 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7325 #endif
7326 #endif
7329 return ret;
7332 return NULL_RTX;
7335 /* Check each extension dependent giv in this class to see if its
7336 root biv is safe from wrapping in the interior mode, which would
7337 make the giv illegal. */
7339 static void
7340 check_ext_dependent_givs (bl, loop_info)
7341 struct iv_class *bl;
7342 struct loop_info *loop_info;
7344 int ze_ok = 0, se_ok = 0, info_ok = 0;
7345 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7346 HOST_WIDE_INT start_val;
7347 unsigned HOST_WIDE_INT u_end_val = 0;
7348 unsigned HOST_WIDE_INT u_start_val = 0;
7349 rtx incr = pc_rtx;
7350 struct induction *v;
7352 /* Make sure the iteration data is available. We must have
7353 constants in order to be certain of no overflow. */
7354 /* ??? An unknown iteration count with an increment of +-1
7355 combined with friendly exit tests of against an invariant
7356 value is also ameanable to optimization. Not implemented. */
7357 if (loop_info->n_iterations > 0
7358 && bl->initial_value
7359 && GET_CODE (bl->initial_value) == CONST_INT
7360 && (incr = biv_total_increment (bl))
7361 && GET_CODE (incr) == CONST_INT
7362 /* Make sure the host can represent the arithmetic. */
7363 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7365 unsigned HOST_WIDE_INT abs_incr, total_incr;
7366 HOST_WIDE_INT s_end_val;
7367 int neg_incr;
7369 info_ok = 1;
7370 start_val = INTVAL (bl->initial_value);
7371 u_start_val = start_val;
7373 neg_incr = 0, abs_incr = INTVAL (incr);
7374 if (INTVAL (incr) < 0)
7375 neg_incr = 1, abs_incr = -abs_incr;
7376 total_incr = abs_incr * loop_info->n_iterations;
7378 /* Check for host arithmatic overflow. */
7379 if (total_incr / loop_info->n_iterations == abs_incr)
7381 unsigned HOST_WIDE_INT u_max;
7382 HOST_WIDE_INT s_max;
7384 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7385 s_end_val = u_end_val;
7386 u_max = GET_MODE_MASK (biv_mode);
7387 s_max = u_max >> 1;
7389 /* Check zero extension of biv ok. */
7390 if (start_val >= 0
7391 /* Check for host arithmatic overflow. */
7392 && (neg_incr
7393 ? u_end_val < u_start_val
7394 : u_end_val > u_start_val)
7395 /* Check for target arithmetic overflow. */
7396 && (neg_incr
7397 ? 1 /* taken care of with host overflow */
7398 : u_end_val <= u_max))
7400 ze_ok = 1;
7403 /* Check sign extension of biv ok. */
7404 /* ??? While it is true that overflow with signed and pointer
7405 arithmetic is undefined, I fear too many programmers don't
7406 keep this fact in mind -- myself included on occasion.
7407 So leave alone with the signed overflow optimizations. */
7408 if (start_val >= -s_max - 1
7409 /* Check for host arithmatic overflow. */
7410 && (neg_incr
7411 ? s_end_val < start_val
7412 : s_end_val > start_val)
7413 /* Check for target arithmetic overflow. */
7414 && (neg_incr
7415 ? s_end_val >= -s_max - 1
7416 : s_end_val <= s_max))
7418 se_ok = 1;
7423 /* Invalidate givs that fail the tests. */
7424 for (v = bl->giv; v; v = v->next_iv)
7425 if (v->ext_dependent)
7427 enum rtx_code code = GET_CODE (v->ext_dependent);
7428 int ok = 0;
7430 switch (code)
7432 case SIGN_EXTEND:
7433 ok = se_ok;
7434 break;
7435 case ZERO_EXTEND:
7436 ok = ze_ok;
7437 break;
7439 case TRUNCATE:
7440 /* We don't know whether this value is being used as either
7441 signed or unsigned, so to safely truncate we must satisfy
7442 both. The initial check here verifies the BIV itself;
7443 once that is successful we may check its range wrt the
7444 derived GIV. */
7445 if (se_ok && ze_ok)
7447 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7448 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7450 /* We know from the above that both endpoints are nonnegative,
7451 and that there is no wrapping. Verify that both endpoints
7452 are within the (signed) range of the outer mode. */
7453 if (u_start_val <= max && u_end_val <= max)
7454 ok = 1;
7456 break;
7458 default:
7459 abort ();
7462 if (ok)
7464 if (loop_dump_stream)
7466 fprintf (loop_dump_stream,
7467 "Verified ext dependent giv at %d of reg %d\n",
7468 INSN_UID (v->insn), bl->regno);
7471 else
7473 if (loop_dump_stream)
7475 const char *why;
7477 if (info_ok)
7478 why = "biv iteration values overflowed";
7479 else
7481 if (incr == pc_rtx)
7482 incr = biv_total_increment (bl);
7483 if (incr == const1_rtx)
7484 why = "biv iteration info incomplete; incr by 1";
7485 else
7486 why = "biv iteration info incomplete";
7489 fprintf (loop_dump_stream,
7490 "Failed ext dependent giv at %d, %s\n",
7491 INSN_UID (v->insn), why);
7493 v->ignore = 1;
7494 bl->all_reduced = 0;
7499 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7502 extend_value_for_giv (v, value)
7503 struct induction *v;
7504 rtx value;
7506 rtx ext_dep = v->ext_dependent;
7508 if (! ext_dep)
7509 return value;
7511 /* Recall that check_ext_dependent_givs verified that the known bounds
7512 of a biv did not overflow or wrap with respect to the extension for
7513 the giv. Therefore, constants need no additional adjustment. */
7514 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7515 return value;
7517 /* Otherwise, we must adjust the value to compensate for the
7518 differing modes of the biv and the giv. */
7519 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7522 struct combine_givs_stats
7524 int giv_number;
7525 int total_benefit;
7528 static int
7529 cmp_combine_givs_stats (xp, yp)
7530 const PTR xp;
7531 const PTR yp;
7533 const struct combine_givs_stats * const x =
7534 (const struct combine_givs_stats *) xp;
7535 const struct combine_givs_stats * const y =
7536 (const struct combine_givs_stats *) yp;
7537 int d;
7538 d = y->total_benefit - x->total_benefit;
7539 /* Stabilize the sort. */
7540 if (!d)
7541 d = x->giv_number - y->giv_number;
7542 return d;
7545 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7546 any other. If so, point SAME to the giv combined with and set NEW_REG to
7547 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7548 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7550 static void
7551 combine_givs (regs, bl)
7552 struct loop_regs *regs;
7553 struct iv_class *bl;
7555 /* Additional benefit to add for being combined multiple times. */
7556 const int extra_benefit = 3;
7558 struct induction *g1, *g2, **giv_array;
7559 int i, j, k, giv_count;
7560 struct combine_givs_stats *stats;
7561 rtx *can_combine;
7563 /* Count givs, because bl->giv_count is incorrect here. */
7564 giv_count = 0;
7565 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7566 if (!g1->ignore)
7567 giv_count++;
7569 giv_array
7570 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7571 i = 0;
7572 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7573 if (!g1->ignore)
7574 giv_array[i++] = g1;
7576 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7577 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7579 for (i = 0; i < giv_count; i++)
7581 int this_benefit;
7582 rtx single_use;
7584 g1 = giv_array[i];
7585 stats[i].giv_number = i;
7587 /* If a DEST_REG GIV is used only once, do not allow it to combine
7588 with anything, for in doing so we will gain nothing that cannot
7589 be had by simply letting the GIV with which we would have combined
7590 to be reduced on its own. The losage shows up in particular with
7591 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7592 be seen elsewhere as well. */
7593 if (g1->giv_type == DEST_REG
7594 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7595 && single_use != const0_rtx)
7596 continue;
7598 this_benefit = g1->benefit;
7599 /* Add an additional weight for zero addends. */
7600 if (g1->no_const_addval)
7601 this_benefit += 1;
7603 for (j = 0; j < giv_count; j++)
7605 rtx this_combine;
7607 g2 = giv_array[j];
7608 if (g1 != g2
7609 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7611 can_combine[i * giv_count + j] = this_combine;
7612 this_benefit += g2->benefit + extra_benefit;
7615 stats[i].total_benefit = this_benefit;
7618 /* Iterate, combining until we can't. */
7619 restart:
7620 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7622 if (loop_dump_stream)
7624 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7625 for (k = 0; k < giv_count; k++)
7627 g1 = giv_array[stats[k].giv_number];
7628 if (!g1->combined_with && !g1->same)
7629 fprintf (loop_dump_stream, " {%d, %d}",
7630 INSN_UID (giv_array[stats[k].giv_number]->insn),
7631 stats[k].total_benefit);
7633 putc ('\n', loop_dump_stream);
7636 for (k = 0; k < giv_count; k++)
7638 int g1_add_benefit = 0;
7640 i = stats[k].giv_number;
7641 g1 = giv_array[i];
7643 /* If it has already been combined, skip. */
7644 if (g1->combined_with || g1->same)
7645 continue;
7647 for (j = 0; j < giv_count; j++)
7649 g2 = giv_array[j];
7650 if (g1 != g2 && can_combine[i * giv_count + j]
7651 /* If it has already been combined, skip. */
7652 && ! g2->same && ! g2->combined_with)
7654 int l;
7656 g2->new_reg = can_combine[i * giv_count + j];
7657 g2->same = g1;
7658 /* For destination, we now may replace by mem expression instead
7659 of register. This changes the costs considerably, so add the
7660 compensation. */
7661 if (g2->giv_type == DEST_ADDR)
7662 g2->benefit = (g2->benefit + reg_address_cost
7663 - address_cost (g2->new_reg,
7664 GET_MODE (g2->mem)));
7665 g1->combined_with++;
7666 g1->lifetime += g2->lifetime;
7668 g1_add_benefit += g2->benefit;
7670 /* ??? The new final_[bg]iv_value code does a much better job
7671 of finding replaceable giv's, and hence this code may no
7672 longer be necessary. */
7673 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7674 g1_add_benefit -= copy_cost;
7676 /* To help optimize the next set of combinations, remove
7677 this giv from the benefits of other potential mates. */
7678 for (l = 0; l < giv_count; ++l)
7680 int m = stats[l].giv_number;
7681 if (can_combine[m * giv_count + j])
7682 stats[l].total_benefit -= g2->benefit + extra_benefit;
7685 if (loop_dump_stream)
7686 fprintf (loop_dump_stream,
7687 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7688 INSN_UID (g2->insn), INSN_UID (g1->insn),
7689 g1->benefit, g1_add_benefit, g1->lifetime);
7693 /* To help optimize the next set of combinations, remove
7694 this giv from the benefits of other potential mates. */
7695 if (g1->combined_with)
7697 for (j = 0; j < giv_count; ++j)
7699 int m = stats[j].giv_number;
7700 if (can_combine[m * giv_count + i])
7701 stats[j].total_benefit -= g1->benefit + extra_benefit;
7704 g1->benefit += g1_add_benefit;
7706 /* We've finished with this giv, and everything it touched.
7707 Restart the combination so that proper weights for the
7708 rest of the givs are properly taken into account. */
7709 /* ??? Ideally we would compact the arrays at this point, so
7710 as to not cover old ground. But sanely compacting
7711 can_combine is tricky. */
7712 goto restart;
7716 /* Clean up. */
7717 free (stats);
7718 free (can_combine);
7721 /* Generate sequence for REG = B * M + A. */
7723 static rtx
7724 gen_add_mult (b, m, a, reg)
7725 rtx b; /* initial value of basic induction variable */
7726 rtx m; /* multiplicative constant */
7727 rtx a; /* additive constant */
7728 rtx reg; /* destination register */
7730 rtx seq;
7731 rtx result;
7733 start_sequence ();
7734 /* Use unsigned arithmetic. */
7735 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7736 if (reg != result)
7737 emit_move_insn (reg, result);
7738 seq = gen_sequence ();
7739 end_sequence ();
7741 return seq;
7745 /* Update registers created in insn sequence SEQ. */
7747 static void
7748 loop_regs_update (loop, seq)
7749 const struct loop *loop ATTRIBUTE_UNUSED;
7750 rtx seq;
7752 /* Update register info for alias analysis. */
7754 if (GET_CODE (seq) == SEQUENCE)
7756 int i;
7757 for (i = 0; i < XVECLEN (seq, 0); ++i)
7759 rtx set = single_set (XVECEXP (seq, 0, i));
7760 if (set && GET_CODE (SET_DEST (set)) == REG)
7761 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7764 else
7766 if (GET_CODE (seq) == SET
7767 && GET_CODE (SET_DEST (seq)) == REG)
7768 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7773 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7775 void
7776 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7777 const struct loop *loop;
7778 rtx b; /* initial value of basic induction variable */
7779 rtx m; /* multiplicative constant */
7780 rtx a; /* additive constant */
7781 rtx reg; /* destination register */
7782 basic_block before_bb;
7783 rtx before_insn;
7785 rtx seq;
7787 if (! before_insn)
7789 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7790 return;
7793 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7794 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7796 /* Increase the lifetime of any invariants moved further in code. */
7797 update_reg_last_use (a, before_insn);
7798 update_reg_last_use (b, before_insn);
7799 update_reg_last_use (m, before_insn);
7801 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7803 /* It is possible that the expansion created lots of new registers.
7804 Iterate over the sequence we just created and record them all. */
7805 loop_regs_update (loop, seq);
7809 /* Emit insns in loop pre-header to set REG = B * M + A. */
7811 void
7812 loop_iv_add_mult_sink (loop, b, m, a, reg)
7813 const struct loop *loop;
7814 rtx b; /* initial value of basic induction variable */
7815 rtx m; /* multiplicative constant */
7816 rtx a; /* additive constant */
7817 rtx reg; /* destination register */
7819 rtx seq;
7821 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7822 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7824 /* Increase the lifetime of any invariants moved further in code.
7825 ???? Is this really necessary? */
7826 update_reg_last_use (a, loop->sink);
7827 update_reg_last_use (b, loop->sink);
7828 update_reg_last_use (m, loop->sink);
7830 loop_insn_sink (loop, seq);
7832 /* It is possible that the expansion created lots of new registers.
7833 Iterate over the sequence we just created and record them all. */
7834 loop_regs_update (loop, seq);
7838 /* Emit insns after loop to set REG = B * M + A. */
7840 void
7841 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7842 const struct loop *loop;
7843 rtx b; /* initial value of basic induction variable */
7844 rtx m; /* multiplicative constant */
7845 rtx a; /* additive constant */
7846 rtx reg; /* destination register */
7848 rtx seq;
7850 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7851 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7853 loop_insn_hoist (loop, seq);
7855 /* It is possible that the expansion created lots of new registers.
7856 Iterate over the sequence we just created and record them all. */
7857 loop_regs_update (loop, seq);
7862 /* Similar to gen_add_mult, but compute cost rather than generating
7863 sequence. */
7865 static int
7866 iv_add_mult_cost (b, m, a, reg)
7867 rtx b; /* initial value of basic induction variable */
7868 rtx m; /* multiplicative constant */
7869 rtx a; /* additive constant */
7870 rtx reg; /* destination register */
7872 int cost = 0;
7873 rtx last, result;
7875 start_sequence ();
7876 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7877 if (reg != result)
7878 emit_move_insn (reg, result);
7879 last = get_last_insn ();
7880 while (last)
7882 rtx t = single_set (last);
7883 if (t)
7884 cost += rtx_cost (SET_SRC (t), SET);
7885 last = PREV_INSN (last);
7887 end_sequence ();
7888 return cost;
7891 /* Test whether A * B can be computed without
7892 an actual multiply insn. Value is 1 if so. */
7894 static int
7895 product_cheap_p (a, b)
7896 rtx a;
7897 rtx b;
7899 int i;
7900 rtx tmp;
7901 int win = 1;
7903 /* If only one is constant, make it B. */
7904 if (GET_CODE (a) == CONST_INT)
7905 tmp = a, a = b, b = tmp;
7907 /* If first constant, both constant, so don't need multiply. */
7908 if (GET_CODE (a) == CONST_INT)
7909 return 1;
7911 /* If second not constant, neither is constant, so would need multiply. */
7912 if (GET_CODE (b) != CONST_INT)
7913 return 0;
7915 /* One operand is constant, so might not need multiply insn. Generate the
7916 code for the multiply and see if a call or multiply, or long sequence
7917 of insns is generated. */
7919 start_sequence ();
7920 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7921 tmp = gen_sequence ();
7922 end_sequence ();
7924 if (GET_CODE (tmp) == SEQUENCE)
7926 if (XVEC (tmp, 0) == 0)
7927 win = 1;
7928 else if (XVECLEN (tmp, 0) > 3)
7929 win = 0;
7930 else
7931 for (i = 0; i < XVECLEN (tmp, 0); i++)
7933 rtx insn = XVECEXP (tmp, 0, i);
7935 if (GET_CODE (insn) != INSN
7936 || (GET_CODE (PATTERN (insn)) == SET
7937 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7938 || (GET_CODE (PATTERN (insn)) == PARALLEL
7939 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7940 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7942 win = 0;
7943 break;
7947 else if (GET_CODE (tmp) == SET
7948 && GET_CODE (SET_SRC (tmp)) == MULT)
7949 win = 0;
7950 else if (GET_CODE (tmp) == PARALLEL
7951 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7952 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7953 win = 0;
7955 return win;
7958 /* Check to see if loop can be terminated by a "decrement and branch until
7959 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7960 Also try reversing an increment loop to a decrement loop
7961 to see if the optimization can be performed.
7962 Value is nonzero if optimization was performed. */
7964 /* This is useful even if the architecture doesn't have such an insn,
7965 because it might change a loops which increments from 0 to n to a loop
7966 which decrements from n to 0. A loop that decrements to zero is usually
7967 faster than one that increments from zero. */
7969 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7970 such as approx_final_value, biv_total_increment, loop_iterations, and
7971 final_[bg]iv_value. */
7973 static int
7974 check_dbra_loop (loop, insn_count)
7975 struct loop *loop;
7976 int insn_count;
7978 struct loop_info *loop_info = LOOP_INFO (loop);
7979 struct loop_regs *regs = LOOP_REGS (loop);
7980 struct loop_ivs *ivs = LOOP_IVS (loop);
7981 struct iv_class *bl;
7982 rtx reg;
7983 rtx jump_label;
7984 rtx final_value;
7985 rtx start_value;
7986 rtx new_add_val;
7987 rtx comparison;
7988 rtx before_comparison;
7989 rtx p;
7990 rtx jump;
7991 rtx first_compare;
7992 int compare_and_branch;
7993 rtx loop_start = loop->start;
7994 rtx loop_end = loop->end;
7996 /* If last insn is a conditional branch, and the insn before tests a
7997 register value, try to optimize it. Otherwise, we can't do anything. */
7999 jump = PREV_INSN (loop_end);
8000 comparison = get_condition_for_loop (loop, jump);
8001 if (comparison == 0)
8002 return 0;
8003 if (!onlyjump_p (jump))
8004 return 0;
8006 /* Try to compute whether the compare/branch at the loop end is one or
8007 two instructions. */
8008 get_condition (jump, &first_compare);
8009 if (first_compare == jump)
8010 compare_and_branch = 1;
8011 else if (first_compare == prev_nonnote_insn (jump))
8012 compare_and_branch = 2;
8013 else
8014 return 0;
8017 /* If more than one condition is present to control the loop, then
8018 do not proceed, as this function does not know how to rewrite
8019 loop tests with more than one condition.
8021 Look backwards from the first insn in the last comparison
8022 sequence and see if we've got another comparison sequence. */
8024 rtx jump1;
8025 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8026 if (GET_CODE (jump1) == JUMP_INSN)
8027 return 0;
8030 /* Check all of the bivs to see if the compare uses one of them.
8031 Skip biv's set more than once because we can't guarantee that
8032 it will be zero on the last iteration. Also skip if the biv is
8033 used between its update and the test insn. */
8035 for (bl = ivs->list; bl; bl = bl->next)
8037 if (bl->biv_count == 1
8038 && ! bl->biv->maybe_multiple
8039 && bl->biv->dest_reg == XEXP (comparison, 0)
8040 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8041 first_compare))
8042 break;
8045 if (! bl)
8046 return 0;
8048 /* Look for the case where the basic induction variable is always
8049 nonnegative, and equals zero on the last iteration.
8050 In this case, add a reg_note REG_NONNEG, which allows the
8051 m68k DBRA instruction to be used. */
8053 if (((GET_CODE (comparison) == GT
8054 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8055 && INTVAL (XEXP (comparison, 1)) == -1)
8056 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8057 && GET_CODE (bl->biv->add_val) == CONST_INT
8058 && INTVAL (bl->biv->add_val) < 0)
8060 /* Initial value must be greater than 0,
8061 init_val % -dec_value == 0 to ensure that it equals zero on
8062 the last iteration */
8064 if (GET_CODE (bl->initial_value) == CONST_INT
8065 && INTVAL (bl->initial_value) > 0
8066 && (INTVAL (bl->initial_value)
8067 % (-INTVAL (bl->biv->add_val))) == 0)
8069 /* register always nonnegative, add REG_NOTE to branch */
8070 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8071 REG_NOTES (jump)
8072 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8073 REG_NOTES (jump));
8074 bl->nonneg = 1;
8076 return 1;
8079 /* If the decrement is 1 and the value was tested as >= 0 before
8080 the loop, then we can safely optimize. */
8081 for (p = loop_start; p; p = PREV_INSN (p))
8083 if (GET_CODE (p) == CODE_LABEL)
8084 break;
8085 if (GET_CODE (p) != JUMP_INSN)
8086 continue;
8088 before_comparison = get_condition_for_loop (loop, p);
8089 if (before_comparison
8090 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8091 && GET_CODE (before_comparison) == LT
8092 && XEXP (before_comparison, 1) == const0_rtx
8093 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8094 && INTVAL (bl->biv->add_val) == -1)
8096 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8097 REG_NOTES (jump)
8098 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8099 REG_NOTES (jump));
8100 bl->nonneg = 1;
8102 return 1;
8106 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8107 && INTVAL (bl->biv->add_val) > 0)
8109 /* Try to change inc to dec, so can apply above optimization. */
8110 /* Can do this if:
8111 all registers modified are induction variables or invariant,
8112 all memory references have non-overlapping addresses
8113 (obviously true if only one write)
8114 allow 2 insns for the compare/jump at the end of the loop. */
8115 /* Also, we must avoid any instructions which use both the reversed
8116 biv and another biv. Such instructions will fail if the loop is
8117 reversed. We meet this condition by requiring that either
8118 no_use_except_counting is true, or else that there is only
8119 one biv. */
8120 int num_nonfixed_reads = 0;
8121 /* 1 if the iteration var is used only to count iterations. */
8122 int no_use_except_counting = 0;
8123 /* 1 if the loop has no memory store, or it has a single memory store
8124 which is reversible. */
8125 int reversible_mem_store = 1;
8127 if (bl->giv_count == 0
8128 && !loop->exit_count
8129 && !loop_info->has_multiple_exit_targets)
8131 rtx bivreg = regno_reg_rtx[bl->regno];
8132 struct iv_class *blt;
8134 /* If there are no givs for this biv, and the only exit is the
8135 fall through at the end of the loop, then
8136 see if perhaps there are no uses except to count. */
8137 no_use_except_counting = 1;
8138 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8139 if (INSN_P (p))
8141 rtx set = single_set (p);
8143 if (set && GET_CODE (SET_DEST (set)) == REG
8144 && REGNO (SET_DEST (set)) == bl->regno)
8145 /* An insn that sets the biv is okay. */
8147 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8148 || p == prev_nonnote_insn (loop_end))
8149 && reg_mentioned_p (bivreg, PATTERN (p)))
8151 /* If either of these insns uses the biv and sets a pseudo
8152 that has more than one usage, then the biv has uses
8153 other than counting since it's used to derive a value
8154 that is used more than one time. */
8155 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8156 regs);
8157 if (regs->multiple_uses)
8159 no_use_except_counting = 0;
8160 break;
8163 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8165 no_use_except_counting = 0;
8166 break;
8170 /* A biv has uses besides counting if it is used to set
8171 another biv. */
8172 for (blt = ivs->list; blt; blt = blt->next)
8173 if (blt->init_set
8174 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8176 no_use_except_counting = 0;
8177 break;
8181 if (no_use_except_counting)
8182 /* No need to worry about MEMs. */
8184 else if (loop_info->num_mem_sets <= 1)
8186 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8187 if (INSN_P (p))
8188 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8190 /* If the loop has a single store, and the destination address is
8191 invariant, then we can't reverse the loop, because this address
8192 might then have the wrong value at loop exit.
8193 This would work if the source was invariant also, however, in that
8194 case, the insn should have been moved out of the loop. */
8196 if (loop_info->num_mem_sets == 1)
8198 struct induction *v;
8200 /* If we could prove that each of the memory locations
8201 written to was different, then we could reverse the
8202 store -- but we don't presently have any way of
8203 knowing that. */
8204 reversible_mem_store = 0;
8206 /* If the store depends on a register that is set after the
8207 store, it depends on the initial value, and is thus not
8208 reversible. */
8209 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8211 if (v->giv_type == DEST_REG
8212 && reg_mentioned_p (v->dest_reg,
8213 PATTERN (loop_info->first_loop_store_insn))
8214 && loop_insn_first_p (loop_info->first_loop_store_insn,
8215 v->insn))
8216 reversible_mem_store = 0;
8220 else
8221 return 0;
8223 /* This code only acts for innermost loops. Also it simplifies
8224 the memory address check by only reversing loops with
8225 zero or one memory access.
8226 Two memory accesses could involve parts of the same array,
8227 and that can't be reversed.
8228 If the biv is used only for counting, than we don't need to worry
8229 about all these things. */
8231 if ((num_nonfixed_reads <= 1
8232 && ! loop_info->has_nonconst_call
8233 && ! loop_info->has_prefetch
8234 && ! loop_info->has_volatile
8235 && reversible_mem_store
8236 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8237 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8238 && (bl == ivs->list && bl->next == 0))
8239 || (no_use_except_counting && ! loop_info->has_prefetch))
8241 rtx tem;
8243 /* Loop can be reversed. */
8244 if (loop_dump_stream)
8245 fprintf (loop_dump_stream, "Can reverse loop\n");
8247 /* Now check other conditions:
8249 The increment must be a constant, as must the initial value,
8250 and the comparison code must be LT.
8252 This test can probably be improved since +/- 1 in the constant
8253 can be obtained by changing LT to LE and vice versa; this is
8254 confusing. */
8256 if (comparison
8257 /* for constants, LE gets turned into LT */
8258 && (GET_CODE (comparison) == LT
8259 || (GET_CODE (comparison) == LE
8260 && no_use_except_counting)))
8262 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8263 rtx initial_value, comparison_value;
8264 int nonneg = 0;
8265 enum rtx_code cmp_code;
8266 int comparison_const_width;
8267 unsigned HOST_WIDE_INT comparison_sign_mask;
8269 add_val = INTVAL (bl->biv->add_val);
8270 comparison_value = XEXP (comparison, 1);
8271 if (GET_MODE (comparison_value) == VOIDmode)
8272 comparison_const_width
8273 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8274 else
8275 comparison_const_width
8276 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8277 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8278 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8279 comparison_sign_mask
8280 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8282 /* If the comparison value is not a loop invariant, then we
8283 can not reverse this loop.
8285 ??? If the insns which initialize the comparison value as
8286 a whole compute an invariant result, then we could move
8287 them out of the loop and proceed with loop reversal. */
8288 if (! loop_invariant_p (loop, comparison_value))
8289 return 0;
8291 if (GET_CODE (comparison_value) == CONST_INT)
8292 comparison_val = INTVAL (comparison_value);
8293 initial_value = bl->initial_value;
8295 /* Normalize the initial value if it is an integer and
8296 has no other use except as a counter. This will allow
8297 a few more loops to be reversed. */
8298 if (no_use_except_counting
8299 && GET_CODE (comparison_value) == CONST_INT
8300 && GET_CODE (initial_value) == CONST_INT)
8302 comparison_val = comparison_val - INTVAL (bl->initial_value);
8303 /* The code below requires comparison_val to be a multiple
8304 of add_val in order to do the loop reversal, so
8305 round up comparison_val to a multiple of add_val.
8306 Since comparison_value is constant, we know that the
8307 current comparison code is LT. */
8308 comparison_val = comparison_val + add_val - 1;
8309 comparison_val
8310 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8311 /* We postpone overflow checks for COMPARISON_VAL here;
8312 even if there is an overflow, we might still be able to
8313 reverse the loop, if converting the loop exit test to
8314 NE is possible. */
8315 initial_value = const0_rtx;
8318 /* First check if we can do a vanilla loop reversal. */
8319 if (initial_value == const0_rtx
8320 /* If we have a decrement_and_branch_on_count,
8321 prefer the NE test, since this will allow that
8322 instruction to be generated. Note that we must
8323 use a vanilla loop reversal if the biv is used to
8324 calculate a giv or has a non-counting use. */
8325 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8326 && defined (HAVE_decrement_and_branch_on_count)
8327 && (! (add_val == 1 && loop->vtop
8328 && (bl->biv_count == 0
8329 || no_use_except_counting)))
8330 #endif
8331 && GET_CODE (comparison_value) == CONST_INT
8332 /* Now do postponed overflow checks on COMPARISON_VAL. */
8333 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8334 & comparison_sign_mask))
8336 /* Register will always be nonnegative, with value
8337 0 on last iteration */
8338 add_adjust = add_val;
8339 nonneg = 1;
8340 cmp_code = GE;
8342 else if (add_val == 1 && loop->vtop
8343 && (bl->biv_count == 0
8344 || no_use_except_counting))
8346 add_adjust = 0;
8347 cmp_code = NE;
8349 else
8350 return 0;
8352 if (GET_CODE (comparison) == LE)
8353 add_adjust -= add_val;
8355 /* If the initial value is not zero, or if the comparison
8356 value is not an exact multiple of the increment, then we
8357 can not reverse this loop. */
8358 if (initial_value == const0_rtx
8359 && GET_CODE (comparison_value) == CONST_INT)
8361 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8362 return 0;
8364 else
8366 if (! no_use_except_counting || add_val != 1)
8367 return 0;
8370 final_value = comparison_value;
8372 /* Reset these in case we normalized the initial value
8373 and comparison value above. */
8374 if (GET_CODE (comparison_value) == CONST_INT
8375 && GET_CODE (initial_value) == CONST_INT)
8377 comparison_value = GEN_INT (comparison_val);
8378 final_value
8379 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8381 bl->initial_value = initial_value;
8383 /* Save some info needed to produce the new insns. */
8384 reg = bl->biv->dest_reg;
8385 jump_label = condjump_label (PREV_INSN (loop_end));
8386 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8388 /* Set start_value; if this is not a CONST_INT, we need
8389 to generate a SUB.
8390 Initialize biv to start_value before loop start.
8391 The old initializing insn will be deleted as a
8392 dead store by flow.c. */
8393 if (initial_value == const0_rtx
8394 && GET_CODE (comparison_value) == CONST_INT)
8396 start_value = GEN_INT (comparison_val - add_adjust);
8397 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8399 else if (GET_CODE (initial_value) == CONST_INT)
8401 enum machine_mode mode = GET_MODE (reg);
8402 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8403 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8405 if (add_insn == 0)
8406 return 0;
8408 start_value
8409 = gen_rtx_PLUS (mode, comparison_value, offset);
8410 loop_insn_hoist (loop, add_insn);
8411 if (GET_CODE (comparison) == LE)
8412 final_value = gen_rtx_PLUS (mode, comparison_value,
8413 GEN_INT (add_val));
8415 else if (! add_adjust)
8417 enum machine_mode mode = GET_MODE (reg);
8418 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8419 initial_value);
8421 if (sub_insn == 0)
8422 return 0;
8423 start_value
8424 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8425 loop_insn_hoist (loop, sub_insn);
8427 else
8428 /* We could handle the other cases too, but it'll be
8429 better to have a testcase first. */
8430 return 0;
8432 /* We may not have a single insn which can increment a reg, so
8433 create a sequence to hold all the insns from expand_inc. */
8434 start_sequence ();
8435 expand_inc (reg, new_add_val);
8436 tem = gen_sequence ();
8437 end_sequence ();
8439 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8440 delete_insn (bl->biv->insn);
8442 /* Update biv info to reflect its new status. */
8443 bl->biv->insn = p;
8444 bl->initial_value = start_value;
8445 bl->biv->add_val = new_add_val;
8447 /* Update loop info. */
8448 loop_info->initial_value = reg;
8449 loop_info->initial_equiv_value = reg;
8450 loop_info->final_value = const0_rtx;
8451 loop_info->final_equiv_value = const0_rtx;
8452 loop_info->comparison_value = const0_rtx;
8453 loop_info->comparison_code = cmp_code;
8454 loop_info->increment = new_add_val;
8456 /* Inc LABEL_NUSES so that delete_insn will
8457 not delete the label. */
8458 LABEL_NUSES (XEXP (jump_label, 0))++;
8460 /* Emit an insn after the end of the loop to set the biv's
8461 proper exit value if it is used anywhere outside the loop. */
8462 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8463 || ! bl->init_insn
8464 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8465 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8467 /* Delete compare/branch at end of loop. */
8468 delete_related_insns (PREV_INSN (loop_end));
8469 if (compare_and_branch == 2)
8470 delete_related_insns (first_compare);
8472 /* Add new compare/branch insn at end of loop. */
8473 start_sequence ();
8474 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8475 GET_MODE (reg), 0,
8476 XEXP (jump_label, 0));
8477 tem = gen_sequence ();
8478 end_sequence ();
8479 emit_jump_insn_before (tem, loop_end);
8481 for (tem = PREV_INSN (loop_end);
8482 tem && GET_CODE (tem) != JUMP_INSN;
8483 tem = PREV_INSN (tem))
8486 if (tem)
8487 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8489 if (nonneg)
8491 if (tem)
8493 /* Increment of LABEL_NUSES done above. */
8494 /* Register is now always nonnegative,
8495 so add REG_NONNEG note to the branch. */
8496 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8497 REG_NOTES (tem));
8499 bl->nonneg = 1;
8502 /* No insn may reference both the reversed and another biv or it
8503 will fail (see comment near the top of the loop reversal
8504 code).
8505 Earlier on, we have verified that the biv has no use except
8506 counting, or it is the only biv in this function.
8507 However, the code that computes no_use_except_counting does
8508 not verify reg notes. It's possible to have an insn that
8509 references another biv, and has a REG_EQUAL note with an
8510 expression based on the reversed biv. To avoid this case,
8511 remove all REG_EQUAL notes based on the reversed biv
8512 here. */
8513 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8514 if (INSN_P (p))
8516 rtx *pnote;
8517 rtx set = single_set (p);
8518 /* If this is a set of a GIV based on the reversed biv, any
8519 REG_EQUAL notes should still be correct. */
8520 if (! set
8521 || GET_CODE (SET_DEST (set)) != REG
8522 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8523 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8524 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8525 for (pnote = &REG_NOTES (p); *pnote;)
8527 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8528 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8529 XEXP (*pnote, 0)))
8530 *pnote = XEXP (*pnote, 1);
8531 else
8532 pnote = &XEXP (*pnote, 1);
8536 /* Mark that this biv has been reversed. Each giv which depends
8537 on this biv, and which is also live past the end of the loop
8538 will have to be fixed up. */
8540 bl->reversed = 1;
8542 if (loop_dump_stream)
8544 fprintf (loop_dump_stream, "Reversed loop");
8545 if (bl->nonneg)
8546 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8547 else
8548 fprintf (loop_dump_stream, "\n");
8551 return 1;
8556 return 0;
8559 /* Verify whether the biv BL appears to be eliminable,
8560 based on the insns in the loop that refer to it.
8562 If ELIMINATE_P is non-zero, actually do the elimination.
8564 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8565 determine whether invariant insns should be placed inside or at the
8566 start of the loop. */
8568 static int
8569 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8570 const struct loop *loop;
8571 struct iv_class *bl;
8572 int eliminate_p;
8573 int threshold, insn_count;
8575 struct loop_ivs *ivs = LOOP_IVS (loop);
8576 rtx reg = bl->biv->dest_reg;
8577 rtx p;
8579 /* Scan all insns in the loop, stopping if we find one that uses the
8580 biv in a way that we cannot eliminate. */
8582 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8584 enum rtx_code code = GET_CODE (p);
8585 basic_block where_bb = 0;
8586 rtx where_insn = threshold >= insn_count ? 0 : p;
8588 /* If this is a libcall that sets a giv, skip ahead to its end. */
8589 if (GET_RTX_CLASS (code) == 'i')
8591 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8593 if (note)
8595 rtx last = XEXP (note, 0);
8596 rtx set = single_set (last);
8598 if (set && GET_CODE (SET_DEST (set)) == REG)
8600 unsigned int regno = REGNO (SET_DEST (set));
8602 if (regno < ivs->n_regs
8603 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8604 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8605 p = last;
8609 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8610 && reg_mentioned_p (reg, PATTERN (p))
8611 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8612 eliminate_p, where_bb, where_insn))
8614 if (loop_dump_stream)
8615 fprintf (loop_dump_stream,
8616 "Cannot eliminate biv %d: biv used in insn %d.\n",
8617 bl->regno, INSN_UID (p));
8618 break;
8622 if (p == loop->end)
8624 if (loop_dump_stream)
8625 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8626 bl->regno, eliminate_p ? "was" : "can be");
8627 return 1;
8630 return 0;
8633 /* INSN and REFERENCE are instructions in the same insn chain.
8634 Return non-zero if INSN is first. */
8637 loop_insn_first_p (insn, reference)
8638 rtx insn, reference;
8640 rtx p, q;
8642 for (p = insn, q = reference;;)
8644 /* Start with test for not first so that INSN == REFERENCE yields not
8645 first. */
8646 if (q == insn || ! p)
8647 return 0;
8648 if (p == reference || ! q)
8649 return 1;
8651 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8652 previous insn, hence the <= comparison below does not work if
8653 P is a note. */
8654 if (INSN_UID (p) < max_uid_for_loop
8655 && INSN_UID (q) < max_uid_for_loop
8656 && GET_CODE (p) != NOTE)
8657 return INSN_LUID (p) <= INSN_LUID (q);
8659 if (INSN_UID (p) >= max_uid_for_loop
8660 || GET_CODE (p) == NOTE)
8661 p = NEXT_INSN (p);
8662 if (INSN_UID (q) >= max_uid_for_loop)
8663 q = NEXT_INSN (q);
8667 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8668 the offset that we have to take into account due to auto-increment /
8669 div derivation is zero. */
8670 static int
8671 biv_elimination_giv_has_0_offset (biv, giv, insn)
8672 struct induction *biv, *giv;
8673 rtx insn;
8675 /* If the giv V had the auto-inc address optimization applied
8676 to it, and INSN occurs between the giv insn and the biv
8677 insn, then we'd have to adjust the value used here.
8678 This is rare, so we don't bother to make this possible. */
8679 if (giv->auto_inc_opt
8680 && ((loop_insn_first_p (giv->insn, insn)
8681 && loop_insn_first_p (insn, biv->insn))
8682 || (loop_insn_first_p (biv->insn, insn)
8683 && loop_insn_first_p (insn, giv->insn))))
8684 return 0;
8686 return 1;
8689 /* If BL appears in X (part of the pattern of INSN), see if we can
8690 eliminate its use. If so, return 1. If not, return 0.
8692 If BIV does not appear in X, return 1.
8694 If ELIMINATE_P is non-zero, actually do the elimination.
8695 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8696 Depending on how many items have been moved out of the loop, it
8697 will either be before INSN (when WHERE_INSN is non-zero) or at the
8698 start of the loop (when WHERE_INSN is zero). */
8700 static int
8701 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8702 const struct loop *loop;
8703 rtx x, insn;
8704 struct iv_class *bl;
8705 int eliminate_p;
8706 basic_block where_bb;
8707 rtx where_insn;
8709 enum rtx_code code = GET_CODE (x);
8710 rtx reg = bl->biv->dest_reg;
8711 enum machine_mode mode = GET_MODE (reg);
8712 struct induction *v;
8713 rtx arg, tem;
8714 #ifdef HAVE_cc0
8715 rtx new;
8716 #endif
8717 int arg_operand;
8718 const char *fmt;
8719 int i, j;
8721 switch (code)
8723 case REG:
8724 /* If we haven't already been able to do something with this BIV,
8725 we can't eliminate it. */
8726 if (x == reg)
8727 return 0;
8728 return 1;
8730 case SET:
8731 /* If this sets the BIV, it is not a problem. */
8732 if (SET_DEST (x) == reg)
8733 return 1;
8735 /* If this is an insn that defines a giv, it is also ok because
8736 it will go away when the giv is reduced. */
8737 for (v = bl->giv; v; v = v->next_iv)
8738 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8739 return 1;
8741 #ifdef HAVE_cc0
8742 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8744 /* Can replace with any giv that was reduced and
8745 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8746 Require a constant for MULT_VAL, so we know it's nonzero.
8747 ??? We disable this optimization to avoid potential
8748 overflows. */
8750 for (v = bl->giv; v; v = v->next_iv)
8751 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8752 && v->add_val == const0_rtx
8753 && ! v->ignore && ! v->maybe_dead && v->always_computable
8754 && v->mode == mode
8755 && 0)
8757 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8758 continue;
8760 if (! eliminate_p)
8761 return 1;
8763 /* If the giv has the opposite direction of change,
8764 then reverse the comparison. */
8765 if (INTVAL (v->mult_val) < 0)
8766 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8767 const0_rtx, v->new_reg);
8768 else
8769 new = v->new_reg;
8771 /* We can probably test that giv's reduced reg. */
8772 if (validate_change (insn, &SET_SRC (x), new, 0))
8773 return 1;
8776 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8777 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8778 Require a constant for MULT_VAL, so we know it's nonzero.
8779 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8780 overflow problem. */
8782 for (v = bl->giv; v; v = v->next_iv)
8783 if (GET_CODE (v->mult_val) == CONST_INT
8784 && v->mult_val != const0_rtx
8785 && ! v->ignore && ! v->maybe_dead && v->always_computable
8786 && v->mode == mode
8787 && (GET_CODE (v->add_val) == SYMBOL_REF
8788 || GET_CODE (v->add_val) == LABEL_REF
8789 || GET_CODE (v->add_val) == CONST
8790 || (GET_CODE (v->add_val) == REG
8791 && REG_POINTER (v->add_val))))
8793 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8794 continue;
8796 if (! eliminate_p)
8797 return 1;
8799 /* If the giv has the opposite direction of change,
8800 then reverse the comparison. */
8801 if (INTVAL (v->mult_val) < 0)
8802 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8803 v->new_reg);
8804 else
8805 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8806 copy_rtx (v->add_val));
8808 /* Replace biv with the giv's reduced register. */
8809 update_reg_last_use (v->add_val, insn);
8810 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8811 return 1;
8813 /* Insn doesn't support that constant or invariant. Copy it
8814 into a register (it will be a loop invariant.) */
8815 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8817 loop_insn_emit_before (loop, 0, where_insn,
8818 gen_move_insn (tem,
8819 copy_rtx (v->add_val)));
8821 /* Substitute the new register for its invariant value in
8822 the compare expression. */
8823 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8824 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8825 return 1;
8828 #endif
8829 break;
8831 case COMPARE:
8832 case EQ: case NE:
8833 case GT: case GE: case GTU: case GEU:
8834 case LT: case LE: case LTU: case LEU:
8835 /* See if either argument is the biv. */
8836 if (XEXP (x, 0) == reg)
8837 arg = XEXP (x, 1), arg_operand = 1;
8838 else if (XEXP (x, 1) == reg)
8839 arg = XEXP (x, 0), arg_operand = 0;
8840 else
8841 break;
8843 if (CONSTANT_P (arg))
8845 /* First try to replace with any giv that has constant positive
8846 mult_val and constant add_val. We might be able to support
8847 negative mult_val, but it seems complex to do it in general. */
8849 for (v = bl->giv; v; v = v->next_iv)
8850 if (GET_CODE (v->mult_val) == CONST_INT
8851 && INTVAL (v->mult_val) > 0
8852 && (GET_CODE (v->add_val) == SYMBOL_REF
8853 || GET_CODE (v->add_val) == LABEL_REF
8854 || GET_CODE (v->add_val) == CONST
8855 || (GET_CODE (v->add_val) == REG
8856 && REG_POINTER (v->add_val)))
8857 && ! v->ignore && ! v->maybe_dead && v->always_computable
8858 && v->mode == mode)
8860 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8861 continue;
8863 /* Don't eliminate if the linear combination that makes up
8864 the giv overflows when it is applied to ARG. */
8865 if (GET_CODE (arg) == CONST_INT)
8867 rtx add_val;
8869 if (GET_CODE (v->add_val) == CONST_INT)
8870 add_val = v->add_val;
8871 else
8872 add_val = const0_rtx;
8874 if (const_mult_add_overflow_p (arg, v->mult_val,
8875 add_val, mode, 1))
8876 continue;
8879 if (! eliminate_p)
8880 return 1;
8882 /* Replace biv with the giv's reduced reg. */
8883 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8885 /* If all constants are actually constant integers and
8886 the derived constant can be directly placed in the COMPARE,
8887 do so. */
8888 if (GET_CODE (arg) == CONST_INT
8889 && GET_CODE (v->add_val) == CONST_INT)
8891 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8892 v->add_val, mode, 1);
8894 else
8896 /* Otherwise, load it into a register. */
8897 tem = gen_reg_rtx (mode);
8898 loop_iv_add_mult_emit_before (loop, arg,
8899 v->mult_val, v->add_val,
8900 tem, where_bb, where_insn);
8903 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8905 if (apply_change_group ())
8906 return 1;
8909 /* Look for giv with positive constant mult_val and nonconst add_val.
8910 Insert insns to calculate new compare value.
8911 ??? Turn this off due to possible overflow. */
8913 for (v = bl->giv; v; v = v->next_iv)
8914 if (GET_CODE (v->mult_val) == CONST_INT
8915 && INTVAL (v->mult_val) > 0
8916 && ! v->ignore && ! v->maybe_dead && v->always_computable
8917 && v->mode == mode
8918 && 0)
8920 rtx tem;
8922 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8923 continue;
8925 if (! eliminate_p)
8926 return 1;
8928 tem = gen_reg_rtx (mode);
8930 /* Replace biv with giv's reduced register. */
8931 validate_change (insn, &XEXP (x, 1 - arg_operand),
8932 v->new_reg, 1);
8934 /* Compute value to compare against. */
8935 loop_iv_add_mult_emit_before (loop, arg,
8936 v->mult_val, v->add_val,
8937 tem, where_bb, where_insn);
8938 /* Use it in this insn. */
8939 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8940 if (apply_change_group ())
8941 return 1;
8944 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8946 if (loop_invariant_p (loop, arg) == 1)
8948 /* Look for giv with constant positive mult_val and nonconst
8949 add_val. Insert insns to compute new compare value.
8950 ??? Turn this off due to possible overflow. */
8952 for (v = bl->giv; v; v = v->next_iv)
8953 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8954 && ! v->ignore && ! v->maybe_dead && v->always_computable
8955 && v->mode == mode
8956 && 0)
8958 rtx tem;
8960 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8961 continue;
8963 if (! eliminate_p)
8964 return 1;
8966 tem = gen_reg_rtx (mode);
8968 /* Replace biv with giv's reduced register. */
8969 validate_change (insn, &XEXP (x, 1 - arg_operand),
8970 v->new_reg, 1);
8972 /* Compute value to compare against. */
8973 loop_iv_add_mult_emit_before (loop, arg,
8974 v->mult_val, v->add_val,
8975 tem, where_bb, where_insn);
8976 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8977 if (apply_change_group ())
8978 return 1;
8982 /* This code has problems. Basically, you can't know when
8983 seeing if we will eliminate BL, whether a particular giv
8984 of ARG will be reduced. If it isn't going to be reduced,
8985 we can't eliminate BL. We can try forcing it to be reduced,
8986 but that can generate poor code.
8988 The problem is that the benefit of reducing TV, below should
8989 be increased if BL can actually be eliminated, but this means
8990 we might have to do a topological sort of the order in which
8991 we try to process biv. It doesn't seem worthwhile to do
8992 this sort of thing now. */
8994 #if 0
8995 /* Otherwise the reg compared with had better be a biv. */
8996 if (GET_CODE (arg) != REG
8997 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8998 return 0;
9000 /* Look for a pair of givs, one for each biv,
9001 with identical coefficients. */
9002 for (v = bl->giv; v; v = v->next_iv)
9004 struct induction *tv;
9006 if (v->ignore || v->maybe_dead || v->mode != mode)
9007 continue;
9009 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9010 tv = tv->next_iv)
9011 if (! tv->ignore && ! tv->maybe_dead
9012 && rtx_equal_p (tv->mult_val, v->mult_val)
9013 && rtx_equal_p (tv->add_val, v->add_val)
9014 && tv->mode == mode)
9016 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9017 continue;
9019 if (! eliminate_p)
9020 return 1;
9022 /* Replace biv with its giv's reduced reg. */
9023 XEXP (x, 1 - arg_operand) = v->new_reg;
9024 /* Replace other operand with the other giv's
9025 reduced reg. */
9026 XEXP (x, arg_operand) = tv->new_reg;
9027 return 1;
9030 #endif
9033 /* If we get here, the biv can't be eliminated. */
9034 return 0;
9036 case MEM:
9037 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9038 biv is used in it, since it will be replaced. */
9039 for (v = bl->giv; v; v = v->next_iv)
9040 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9041 return 1;
9042 break;
9044 default:
9045 break;
9048 /* See if any subexpression fails elimination. */
9049 fmt = GET_RTX_FORMAT (code);
9050 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9052 switch (fmt[i])
9054 case 'e':
9055 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9056 eliminate_p, where_bb, where_insn))
9057 return 0;
9058 break;
9060 case 'E':
9061 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9062 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9063 eliminate_p, where_bb, where_insn))
9064 return 0;
9065 break;
9069 return 1;
9072 /* Return nonzero if the last use of REG
9073 is in an insn following INSN in the same basic block. */
9075 static int
9076 last_use_this_basic_block (reg, insn)
9077 rtx reg;
9078 rtx insn;
9080 rtx n;
9081 for (n = insn;
9082 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9083 n = NEXT_INSN (n))
9085 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9086 return 1;
9088 return 0;
9091 /* Called via `note_stores' to record the initial value of a biv. Here we
9092 just record the location of the set and process it later. */
9094 static void
9095 record_initial (dest, set, data)
9096 rtx dest;
9097 rtx set;
9098 void *data ATTRIBUTE_UNUSED;
9100 struct loop_ivs *ivs = (struct loop_ivs *) data;
9101 struct iv_class *bl;
9103 if (GET_CODE (dest) != REG
9104 || REGNO (dest) >= ivs->n_regs
9105 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9106 return;
9108 bl = REG_IV_CLASS (ivs, REGNO (dest));
9110 /* If this is the first set found, record it. */
9111 if (bl->init_insn == 0)
9113 bl->init_insn = note_insn;
9114 bl->init_set = set;
9118 /* If any of the registers in X are "old" and currently have a last use earlier
9119 than INSN, update them to have a last use of INSN. Their actual last use
9120 will be the previous insn but it will not have a valid uid_luid so we can't
9121 use it. X must be a source expression only. */
9123 static void
9124 update_reg_last_use (x, insn)
9125 rtx x;
9126 rtx insn;
9128 /* Check for the case where INSN does not have a valid luid. In this case,
9129 there is no need to modify the regno_last_uid, as this can only happen
9130 when code is inserted after the loop_end to set a pseudo's final value,
9131 and hence this insn will never be the last use of x.
9132 ???? This comment is not correct. See for example loop_givs_reduce.
9133 This may insert an insn before another new insn. */
9134 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9135 && INSN_UID (insn) < max_uid_for_loop
9136 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9138 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9140 else
9142 int i, j;
9143 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9144 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9146 if (fmt[i] == 'e')
9147 update_reg_last_use (XEXP (x, i), insn);
9148 else if (fmt[i] == 'E')
9149 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9150 update_reg_last_use (XVECEXP (x, i, j), insn);
9155 /* Given an insn INSN and condition COND, return the condition in a
9156 canonical form to simplify testing by callers. Specifically:
9158 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9159 (2) Both operands will be machine operands; (cc0) will have been replaced.
9160 (3) If an operand is a constant, it will be the second operand.
9161 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9162 for GE, GEU, and LEU.
9164 If the condition cannot be understood, or is an inequality floating-point
9165 comparison which needs to be reversed, 0 will be returned.
9167 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9169 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9170 insn used in locating the condition was found. If a replacement test
9171 of the condition is desired, it should be placed in front of that
9172 insn and we will be sure that the inputs are still valid.
9174 If WANT_REG is non-zero, we wish the condition to be relative to that
9175 register, if possible. Therefore, do not canonicalize the condition
9176 further. */
9179 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9180 rtx insn;
9181 rtx cond;
9182 int reverse;
9183 rtx *earliest;
9184 rtx want_reg;
9186 enum rtx_code code;
9187 rtx prev = insn;
9188 rtx set;
9189 rtx tem;
9190 rtx op0, op1;
9191 int reverse_code = 0;
9192 enum machine_mode mode;
9194 code = GET_CODE (cond);
9195 mode = GET_MODE (cond);
9196 op0 = XEXP (cond, 0);
9197 op1 = XEXP (cond, 1);
9199 if (reverse)
9200 code = reversed_comparison_code (cond, insn);
9201 if (code == UNKNOWN)
9202 return 0;
9204 if (earliest)
9205 *earliest = insn;
9207 /* If we are comparing a register with zero, see if the register is set
9208 in the previous insn to a COMPARE or a comparison operation. Perform
9209 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9210 in cse.c */
9212 while (GET_RTX_CLASS (code) == '<'
9213 && op1 == CONST0_RTX (GET_MODE (op0))
9214 && op0 != want_reg)
9216 /* Set non-zero when we find something of interest. */
9217 rtx x = 0;
9219 #ifdef HAVE_cc0
9220 /* If comparison with cc0, import actual comparison from compare
9221 insn. */
9222 if (op0 == cc0_rtx)
9224 if ((prev = prev_nonnote_insn (prev)) == 0
9225 || GET_CODE (prev) != INSN
9226 || (set = single_set (prev)) == 0
9227 || SET_DEST (set) != cc0_rtx)
9228 return 0;
9230 op0 = SET_SRC (set);
9231 op1 = CONST0_RTX (GET_MODE (op0));
9232 if (earliest)
9233 *earliest = prev;
9235 #endif
9237 /* If this is a COMPARE, pick up the two things being compared. */
9238 if (GET_CODE (op0) == COMPARE)
9240 op1 = XEXP (op0, 1);
9241 op0 = XEXP (op0, 0);
9242 continue;
9244 else if (GET_CODE (op0) != REG)
9245 break;
9247 /* Go back to the previous insn. Stop if it is not an INSN. We also
9248 stop if it isn't a single set or if it has a REG_INC note because
9249 we don't want to bother dealing with it. */
9251 if ((prev = prev_nonnote_insn (prev)) == 0
9252 || GET_CODE (prev) != INSN
9253 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9254 break;
9256 set = set_of (op0, prev);
9258 if (set
9259 && (GET_CODE (set) != SET
9260 || !rtx_equal_p (SET_DEST (set), op0)))
9261 break;
9263 /* If this is setting OP0, get what it sets it to if it looks
9264 relevant. */
9265 if (set)
9267 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9269 /* ??? We may not combine comparisons done in a CCmode with
9270 comparisons not done in a CCmode. This is to aid targets
9271 like Alpha that have an IEEE compliant EQ instruction, and
9272 a non-IEEE compliant BEQ instruction. The use of CCmode is
9273 actually artificial, simply to prevent the combination, but
9274 should not affect other platforms.
9276 However, we must allow VOIDmode comparisons to match either
9277 CCmode or non-CCmode comparison, because some ports have
9278 modeless comparisons inside branch patterns.
9280 ??? This mode check should perhaps look more like the mode check
9281 in simplify_comparison in combine. */
9283 if ((GET_CODE (SET_SRC (set)) == COMPARE
9284 || (((code == NE
9285 || (code == LT
9286 && GET_MODE_CLASS (inner_mode) == MODE_INT
9287 && (GET_MODE_BITSIZE (inner_mode)
9288 <= HOST_BITS_PER_WIDE_INT)
9289 && (STORE_FLAG_VALUE
9290 & ((HOST_WIDE_INT) 1
9291 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9292 #ifdef FLOAT_STORE_FLAG_VALUE
9293 || (code == LT
9294 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9295 && (REAL_VALUE_NEGATIVE
9296 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9297 #endif
9299 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9300 && (((GET_MODE_CLASS (mode) == MODE_CC)
9301 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9302 || mode == VOIDmode || inner_mode == VOIDmode))
9303 x = SET_SRC (set);
9304 else if (((code == EQ
9305 || (code == GE
9306 && (GET_MODE_BITSIZE (inner_mode)
9307 <= HOST_BITS_PER_WIDE_INT)
9308 && GET_MODE_CLASS (inner_mode) == MODE_INT
9309 && (STORE_FLAG_VALUE
9310 & ((HOST_WIDE_INT) 1
9311 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9312 #ifdef FLOAT_STORE_FLAG_VALUE
9313 || (code == GE
9314 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9315 && (REAL_VALUE_NEGATIVE
9316 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9317 #endif
9319 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9320 && (((GET_MODE_CLASS (mode) == MODE_CC)
9321 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9322 || mode == VOIDmode || inner_mode == VOIDmode))
9325 reverse_code = 1;
9326 x = SET_SRC (set);
9328 else
9329 break;
9332 else if (reg_set_p (op0, prev))
9333 /* If this sets OP0, but not directly, we have to give up. */
9334 break;
9336 if (x)
9338 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9339 code = GET_CODE (x);
9340 if (reverse_code)
9342 code = reversed_comparison_code (x, prev);
9343 if (code == UNKNOWN)
9344 return 0;
9345 reverse_code = 0;
9348 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9349 if (earliest)
9350 *earliest = prev;
9354 /* If constant is first, put it last. */
9355 if (CONSTANT_P (op0))
9356 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9358 /* If OP0 is the result of a comparison, we weren't able to find what
9359 was really being compared, so fail. */
9360 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9361 return 0;
9363 /* Canonicalize any ordered comparison with integers involving equality
9364 if we can do computations in the relevant mode and we do not
9365 overflow. */
9367 if (GET_CODE (op1) == CONST_INT
9368 && GET_MODE (op0) != VOIDmode
9369 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9371 HOST_WIDE_INT const_val = INTVAL (op1);
9372 unsigned HOST_WIDE_INT uconst_val = const_val;
9373 unsigned HOST_WIDE_INT max_val
9374 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9376 switch (code)
9378 case LE:
9379 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9380 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9381 break;
9383 /* When cross-compiling, const_val might be sign-extended from
9384 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9385 case GE:
9386 if ((HOST_WIDE_INT) (const_val & max_val)
9387 != (((HOST_WIDE_INT) 1
9388 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9389 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9390 break;
9392 case LEU:
9393 if (uconst_val < max_val)
9394 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9395 break;
9397 case GEU:
9398 if (uconst_val != 0)
9399 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9400 break;
9402 default:
9403 break;
9407 #ifdef HAVE_cc0
9408 /* Never return CC0; return zero instead. */
9409 if (op0 == cc0_rtx)
9410 return 0;
9411 #endif
9413 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9416 /* Given a jump insn JUMP, return the condition that will cause it to branch
9417 to its JUMP_LABEL. If the condition cannot be understood, or is an
9418 inequality floating-point comparison which needs to be reversed, 0 will
9419 be returned.
9421 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9422 insn used in locating the condition was found. If a replacement test
9423 of the condition is desired, it should be placed in front of that
9424 insn and we will be sure that the inputs are still valid. */
9427 get_condition (jump, earliest)
9428 rtx jump;
9429 rtx *earliest;
9431 rtx cond;
9432 int reverse;
9433 rtx set;
9435 /* If this is not a standard conditional jump, we can't parse it. */
9436 if (GET_CODE (jump) != JUMP_INSN
9437 || ! any_condjump_p (jump))
9438 return 0;
9439 set = pc_set (jump);
9441 cond = XEXP (SET_SRC (set), 0);
9443 /* If this branches to JUMP_LABEL when the condition is false, reverse
9444 the condition. */
9445 reverse
9446 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9447 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9449 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9452 /* Similar to above routine, except that we also put an invariant last
9453 unless both operands are invariants. */
9456 get_condition_for_loop (loop, x)
9457 const struct loop *loop;
9458 rtx x;
9460 rtx comparison = get_condition (x, (rtx*) 0);
9462 if (comparison == 0
9463 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9464 || loop_invariant_p (loop, XEXP (comparison, 1)))
9465 return comparison;
9467 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9468 XEXP (comparison, 1), XEXP (comparison, 0));
9471 /* Scan the function and determine whether it has indirect (computed) jumps.
9473 This is taken mostly from flow.c; similar code exists elsewhere
9474 in the compiler. It may be useful to put this into rtlanal.c. */
9475 static int
9476 indirect_jump_in_function_p (start)
9477 rtx start;
9479 rtx insn;
9481 for (insn = start; insn; insn = NEXT_INSN (insn))
9482 if (computed_jump_p (insn))
9483 return 1;
9485 return 0;
9488 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9489 documentation for LOOP_MEMS for the definition of `appropriate'.
9490 This function is called from prescan_loop via for_each_rtx. */
9492 static int
9493 insert_loop_mem (mem, data)
9494 rtx *mem;
9495 void *data ATTRIBUTE_UNUSED;
9497 struct loop_info *loop_info = data;
9498 int i;
9499 rtx m = *mem;
9501 if (m == NULL_RTX)
9502 return 0;
9504 switch (GET_CODE (m))
9506 case MEM:
9507 break;
9509 case CLOBBER:
9510 /* We're not interested in MEMs that are only clobbered. */
9511 return -1;
9513 case CONST_DOUBLE:
9514 /* We're not interested in the MEM associated with a
9515 CONST_DOUBLE, so there's no need to traverse into this. */
9516 return -1;
9518 case EXPR_LIST:
9519 /* We're not interested in any MEMs that only appear in notes. */
9520 return -1;
9522 default:
9523 /* This is not a MEM. */
9524 return 0;
9527 /* See if we've already seen this MEM. */
9528 for (i = 0; i < loop_info->mems_idx; ++i)
9529 if (rtx_equal_p (m, loop_info->mems[i].mem))
9531 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9532 /* The modes of the two memory accesses are different. If
9533 this happens, something tricky is going on, and we just
9534 don't optimize accesses to this MEM. */
9535 loop_info->mems[i].optimize = 0;
9537 return 0;
9540 /* Resize the array, if necessary. */
9541 if (loop_info->mems_idx == loop_info->mems_allocated)
9543 if (loop_info->mems_allocated != 0)
9544 loop_info->mems_allocated *= 2;
9545 else
9546 loop_info->mems_allocated = 32;
9548 loop_info->mems = (loop_mem_info *)
9549 xrealloc (loop_info->mems,
9550 loop_info->mems_allocated * sizeof (loop_mem_info));
9553 /* Actually insert the MEM. */
9554 loop_info->mems[loop_info->mems_idx].mem = m;
9555 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9556 because we can't put it in a register. We still store it in the
9557 table, though, so that if we see the same address later, but in a
9558 non-BLK mode, we'll not think we can optimize it at that point. */
9559 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9560 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9561 ++loop_info->mems_idx;
9563 return 0;
9567 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9569 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9570 register that is modified by an insn between FROM and TO. If the
9571 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9572 more, stop incrementing it, to avoid overflow.
9574 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9575 register I is used, if it is only used once. Otherwise, it is set
9576 to 0 (for no uses) or const0_rtx for more than one use. This
9577 parameter may be zero, in which case this processing is not done.
9579 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9580 optimize register I. */
9582 static void
9583 loop_regs_scan (loop, extra_size)
9584 const struct loop *loop;
9585 int extra_size;
9587 struct loop_regs *regs = LOOP_REGS (loop);
9588 int old_nregs;
9589 /* last_set[n] is nonzero iff reg n has been set in the current
9590 basic block. In that case, it is the insn that last set reg n. */
9591 rtx *last_set;
9592 rtx insn;
9593 int i;
9595 old_nregs = regs->num;
9596 regs->num = max_reg_num ();
9598 /* Grow the regs array if not allocated or too small. */
9599 if (regs->num >= regs->size)
9601 regs->size = regs->num + extra_size;
9603 regs->array = (struct loop_reg *)
9604 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9606 /* Zero the new elements. */
9607 memset (regs->array + old_nregs, 0,
9608 (regs->size - old_nregs) * sizeof (*regs->array));
9611 /* Clear previously scanned fields but do not clear n_times_set. */
9612 for (i = 0; i < old_nregs; i++)
9614 regs->array[i].set_in_loop = 0;
9615 regs->array[i].may_not_optimize = 0;
9616 regs->array[i].single_usage = NULL_RTX;
9619 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9621 /* Scan the loop, recording register usage. */
9622 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9623 insn = NEXT_INSN (insn))
9625 if (INSN_P (insn))
9627 /* Record registers that have exactly one use. */
9628 find_single_use_in_loop (regs, insn, PATTERN (insn));
9630 /* Include uses in REG_EQUAL notes. */
9631 if (REG_NOTES (insn))
9632 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9634 if (GET_CODE (PATTERN (insn)) == SET
9635 || GET_CODE (PATTERN (insn)) == CLOBBER)
9636 count_one_set (regs, insn, PATTERN (insn), last_set);
9637 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9639 int i;
9640 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9641 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9642 last_set);
9646 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9647 memset (last_set, 0, regs->num * sizeof (rtx));
9650 /* Invalidate all hard registers clobbered by calls. With one exception:
9651 a call-clobbered PIC register is still function-invariant for our
9652 purposes, since we can hoist any PIC calculations out of the loop.
9653 Thus the call to rtx_varies_p. */
9654 if (LOOP_INFO (loop)->has_call)
9655 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9656 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9657 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
9659 regs->array[i].may_not_optimize = 1;
9660 regs->array[i].set_in_loop = 1;
9663 #ifdef AVOID_CCMODE_COPIES
9664 /* Don't try to move insns which set CC registers if we should not
9665 create CCmode register copies. */
9666 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9667 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9668 regs->array[i].may_not_optimize = 1;
9669 #endif
9671 /* Set regs->array[I].n_times_set for the new registers. */
9672 for (i = old_nregs; i < regs->num; i++)
9673 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9675 free (last_set);
9678 /* Returns the number of real INSNs in the LOOP. */
9680 static int
9681 count_insns_in_loop (loop)
9682 const struct loop *loop;
9684 int count = 0;
9685 rtx insn;
9687 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9688 insn = NEXT_INSN (insn))
9689 if (INSN_P (insn))
9690 ++count;
9692 return count;
9695 /* Move MEMs into registers for the duration of the loop. */
9697 static void
9698 load_mems (loop)
9699 const struct loop *loop;
9701 struct loop_info *loop_info = LOOP_INFO (loop);
9702 struct loop_regs *regs = LOOP_REGS (loop);
9703 int maybe_never = 0;
9704 int i;
9705 rtx p, prev_ebb_head;
9706 rtx label = NULL_RTX;
9707 rtx end_label;
9708 /* Nonzero if the next instruction may never be executed. */
9709 int next_maybe_never = 0;
9710 unsigned int last_max_reg = max_reg_num ();
9712 if (loop_info->mems_idx == 0)
9713 return;
9715 /* We cannot use next_label here because it skips over normal insns. */
9716 end_label = next_nonnote_insn (loop->end);
9717 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9718 end_label = NULL_RTX;
9720 /* Check to see if it's possible that some instructions in the loop are
9721 never executed. Also check if there is a goto out of the loop other
9722 than right after the end of the loop. */
9723 for (p = next_insn_in_loop (loop, loop->scan_start);
9724 p != NULL_RTX;
9725 p = next_insn_in_loop (loop, p))
9727 if (GET_CODE (p) == CODE_LABEL)
9728 maybe_never = 1;
9729 else if (GET_CODE (p) == JUMP_INSN
9730 /* If we enter the loop in the middle, and scan
9731 around to the beginning, don't set maybe_never
9732 for that. This must be an unconditional jump,
9733 otherwise the code at the top of the loop might
9734 never be executed. Unconditional jumps are
9735 followed a by barrier then loop end. */
9736 && ! (GET_CODE (p) == JUMP_INSN
9737 && JUMP_LABEL (p) == loop->top
9738 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9739 && any_uncondjump_p (p)))
9741 /* If this is a jump outside of the loop but not right
9742 after the end of the loop, we would have to emit new fixup
9743 sequences for each such label. */
9744 if (/* If we can't tell where control might go when this
9745 JUMP_INSN is executed, we must be conservative. */
9746 !JUMP_LABEL (p)
9747 || (JUMP_LABEL (p) != end_label
9748 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9749 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9750 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9751 return;
9753 if (!any_condjump_p (p))
9754 /* Something complicated. */
9755 maybe_never = 1;
9756 else
9757 /* If there are any more instructions in the loop, they
9758 might not be reached. */
9759 next_maybe_never = 1;
9761 else if (next_maybe_never)
9762 maybe_never = 1;
9765 /* Find start of the extended basic block that enters the loop. */
9766 for (p = loop->start;
9767 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9768 p = PREV_INSN (p))
9770 prev_ebb_head = p;
9772 cselib_init ();
9774 /* Build table of mems that get set to constant values before the
9775 loop. */
9776 for (; p != loop->start; p = NEXT_INSN (p))
9777 cselib_process_insn (p);
9779 /* Actually move the MEMs. */
9780 for (i = 0; i < loop_info->mems_idx; ++i)
9782 regset_head load_copies;
9783 regset_head store_copies;
9784 int written = 0;
9785 rtx reg;
9786 rtx mem = loop_info->mems[i].mem;
9787 rtx mem_list_entry;
9789 if (MEM_VOLATILE_P (mem)
9790 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9791 /* There's no telling whether or not MEM is modified. */
9792 loop_info->mems[i].optimize = 0;
9794 /* Go through the MEMs written to in the loop to see if this
9795 one is aliased by one of them. */
9796 mem_list_entry = loop_info->store_mems;
9797 while (mem_list_entry)
9799 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9800 written = 1;
9801 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9802 mem, rtx_varies_p))
9804 /* MEM is indeed aliased by this store. */
9805 loop_info->mems[i].optimize = 0;
9806 break;
9808 mem_list_entry = XEXP (mem_list_entry, 1);
9811 if (flag_float_store && written
9812 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9813 loop_info->mems[i].optimize = 0;
9815 /* If this MEM is written to, we must be sure that there
9816 are no reads from another MEM that aliases this one. */
9817 if (loop_info->mems[i].optimize && written)
9819 int j;
9821 for (j = 0; j < loop_info->mems_idx; ++j)
9823 if (j == i)
9824 continue;
9825 else if (true_dependence (mem,
9826 VOIDmode,
9827 loop_info->mems[j].mem,
9828 rtx_varies_p))
9830 /* It's not safe to hoist loop_info->mems[i] out of
9831 the loop because writes to it might not be
9832 seen by reads from loop_info->mems[j]. */
9833 loop_info->mems[i].optimize = 0;
9834 break;
9839 if (maybe_never && may_trap_p (mem))
9840 /* We can't access the MEM outside the loop; it might
9841 cause a trap that wouldn't have happened otherwise. */
9842 loop_info->mems[i].optimize = 0;
9844 if (!loop_info->mems[i].optimize)
9845 /* We thought we were going to lift this MEM out of the
9846 loop, but later discovered that we could not. */
9847 continue;
9849 INIT_REG_SET (&load_copies);
9850 INIT_REG_SET (&store_copies);
9852 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9853 order to keep scan_loop from moving stores to this MEM
9854 out of the loop just because this REG is neither a
9855 user-variable nor used in the loop test. */
9856 reg = gen_reg_rtx (GET_MODE (mem));
9857 REG_USERVAR_P (reg) = 1;
9858 loop_info->mems[i].reg = reg;
9860 /* Now, replace all references to the MEM with the
9861 corresponding pseudos. */
9862 maybe_never = 0;
9863 for (p = next_insn_in_loop (loop, loop->scan_start);
9864 p != NULL_RTX;
9865 p = next_insn_in_loop (loop, p))
9867 if (INSN_P (p))
9869 rtx set;
9871 set = single_set (p);
9873 /* See if this copies the mem into a register that isn't
9874 modified afterwards. We'll try to do copy propagation
9875 a little further on. */
9876 if (set
9877 /* @@@ This test is _way_ too conservative. */
9878 && ! maybe_never
9879 && GET_CODE (SET_DEST (set)) == REG
9880 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9881 && REGNO (SET_DEST (set)) < last_max_reg
9882 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9883 && rtx_equal_p (SET_SRC (set), mem))
9884 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9886 /* See if this copies the mem from a register that isn't
9887 modified afterwards. We'll try to remove the
9888 redundant copy later on by doing a little register
9889 renaming and copy propagation. This will help
9890 to untangle things for the BIV detection code. */
9891 if (set
9892 && ! maybe_never
9893 && GET_CODE (SET_SRC (set)) == REG
9894 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9895 && REGNO (SET_SRC (set)) < last_max_reg
9896 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9897 && rtx_equal_p (SET_DEST (set), mem))
9898 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9900 /* If this is a call which uses / clobbers this memory
9901 location, we must not change the interface here. */
9902 if (GET_CODE (p) == CALL_INSN
9903 && reg_mentioned_p (loop_info->mems[i].mem,
9904 CALL_INSN_FUNCTION_USAGE (p)))
9906 cancel_changes (0);
9907 loop_info->mems[i].optimize = 0;
9908 break;
9910 else
9911 /* Replace the memory reference with the shadow register. */
9912 replace_loop_mems (p, loop_info->mems[i].mem,
9913 loop_info->mems[i].reg);
9916 if (GET_CODE (p) == CODE_LABEL
9917 || GET_CODE (p) == JUMP_INSN)
9918 maybe_never = 1;
9921 if (! loop_info->mems[i].optimize)
9922 ; /* We found we couldn't do the replacement, so do nothing. */
9923 else if (! apply_change_group ())
9924 /* We couldn't replace all occurrences of the MEM. */
9925 loop_info->mems[i].optimize = 0;
9926 else
9928 /* Load the memory immediately before LOOP->START, which is
9929 the NOTE_LOOP_BEG. */
9930 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9931 rtx set;
9932 rtx best = mem;
9933 int j;
9934 struct elt_loc_list *const_equiv = 0;
9936 if (e)
9938 struct elt_loc_list *equiv;
9939 struct elt_loc_list *best_equiv = 0;
9940 for (equiv = e->locs; equiv; equiv = equiv->next)
9942 if (CONSTANT_P (equiv->loc))
9943 const_equiv = equiv;
9944 else if (GET_CODE (equiv->loc) == REG
9945 /* Extending hard register lifetimes causes crash
9946 on SRC targets. Doing so on non-SRC is
9947 probably also not good idea, since we most
9948 probably have pseudoregister equivalence as
9949 well. */
9950 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9951 best_equiv = equiv;
9953 /* Use the constant equivalence if that is cheap enough. */
9954 if (! best_equiv)
9955 best_equiv = const_equiv;
9956 else if (const_equiv
9957 && (rtx_cost (const_equiv->loc, SET)
9958 <= rtx_cost (best_equiv->loc, SET)))
9960 best_equiv = const_equiv;
9961 const_equiv = 0;
9964 /* If best_equiv is nonzero, we know that MEM is set to a
9965 constant or register before the loop. We will use this
9966 knowledge to initialize the shadow register with that
9967 constant or reg rather than by loading from MEM. */
9968 if (best_equiv)
9969 best = copy_rtx (best_equiv->loc);
9972 set = gen_move_insn (reg, best);
9973 set = loop_insn_hoist (loop, set);
9974 if (REG_P (best))
9976 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9977 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9979 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9980 break;
9984 if (const_equiv)
9985 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9987 if (written)
9989 if (label == NULL_RTX)
9991 label = gen_label_rtx ();
9992 emit_label_after (label, loop->end);
9995 /* Store the memory immediately after END, which is
9996 the NOTE_LOOP_END. */
9997 set = gen_move_insn (copy_rtx (mem), reg);
9998 loop_insn_emit_after (loop, 0, label, set);
10001 if (loop_dump_stream)
10003 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10004 REGNO (reg), (written ? "r/w" : "r/o"));
10005 print_rtl (loop_dump_stream, mem);
10006 fputc ('\n', loop_dump_stream);
10009 /* Attempt a bit of copy propagation. This helps untangle the
10010 data flow, and enables {basic,general}_induction_var to find
10011 more bivs/givs. */
10012 EXECUTE_IF_SET_IN_REG_SET
10013 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10015 try_copy_prop (loop, reg, j);
10017 CLEAR_REG_SET (&load_copies);
10019 EXECUTE_IF_SET_IN_REG_SET
10020 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10022 try_swap_copy_prop (loop, reg, j);
10024 CLEAR_REG_SET (&store_copies);
10028 if (label != NULL_RTX && end_label != NULL_RTX)
10030 /* Now, we need to replace all references to the previous exit
10031 label with the new one. */
10032 rtx_pair rr;
10033 rr.r1 = end_label;
10034 rr.r2 = label;
10036 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10038 for_each_rtx (&p, replace_label, &rr);
10040 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10041 field. This is not handled by for_each_rtx because it doesn't
10042 handle unprinted ('0') fields. We need to update JUMP_LABEL
10043 because the immediately following unroll pass will use it.
10044 replace_label would not work anyways, because that only handles
10045 LABEL_REFs. */
10046 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10047 JUMP_LABEL (p) = label;
10051 cselib_finish ();
10054 /* For communication between note_reg_stored and its caller. */
10055 struct note_reg_stored_arg
10057 int set_seen;
10058 rtx reg;
10061 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10062 is equal to ARG. */
10063 static void
10064 note_reg_stored (x, setter, arg)
10065 rtx x, setter ATTRIBUTE_UNUSED;
10066 void *arg;
10068 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10069 if (t->reg == x)
10070 t->set_seen = 1;
10073 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10074 There must be exactly one insn that sets this pseudo; it will be
10075 deleted if all replacements succeed and we can prove that the register
10076 is not used after the loop. */
10078 static void
10079 try_copy_prop (loop, replacement, regno)
10080 const struct loop *loop;
10081 rtx replacement;
10082 unsigned int regno;
10084 /* This is the reg that we are copying from. */
10085 rtx reg_rtx = regno_reg_rtx[regno];
10086 rtx init_insn = 0;
10087 rtx insn;
10088 /* These help keep track of whether we replaced all uses of the reg. */
10089 int replaced_last = 0;
10090 int store_is_first = 0;
10092 for (insn = next_insn_in_loop (loop, loop->scan_start);
10093 insn != NULL_RTX;
10094 insn = next_insn_in_loop (loop, insn))
10096 rtx set;
10098 /* Only substitute within one extended basic block from the initializing
10099 insn. */
10100 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10101 break;
10103 if (! INSN_P (insn))
10104 continue;
10106 /* Is this the initializing insn? */
10107 set = single_set (insn);
10108 if (set
10109 && GET_CODE (SET_DEST (set)) == REG
10110 && REGNO (SET_DEST (set)) == regno)
10112 if (init_insn)
10113 abort ();
10115 init_insn = insn;
10116 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10117 store_is_first = 1;
10120 /* Only substitute after seeing the initializing insn. */
10121 if (init_insn && insn != init_insn)
10123 struct note_reg_stored_arg arg;
10125 replace_loop_regs (insn, reg_rtx, replacement);
10126 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10127 replaced_last = 1;
10129 /* Stop replacing when REPLACEMENT is modified. */
10130 arg.reg = replacement;
10131 arg.set_seen = 0;
10132 note_stores (PATTERN (insn), note_reg_stored, &arg);
10133 if (arg.set_seen)
10135 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10137 /* It is possible that we've turned previously valid REG_EQUAL to
10138 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10139 REPLACEMENT is modified, we get different meaning. */
10140 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10141 remove_note (insn, note);
10142 break;
10146 if (! init_insn)
10147 abort ();
10148 if (apply_change_group ())
10150 if (loop_dump_stream)
10151 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10152 if (store_is_first && replaced_last)
10154 rtx first;
10155 rtx retval_note;
10157 /* Assume we're just deleting INIT_INSN. */
10158 first = init_insn;
10159 /* Look for REG_RETVAL note. If we're deleting the end of
10160 the libcall sequence, the whole sequence can go. */
10161 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10162 /* If we found a REG_RETVAL note, find the first instruction
10163 in the sequence. */
10164 if (retval_note)
10165 first = XEXP (retval_note, 0);
10167 /* Delete the instructions. */
10168 loop_delete_insns (first, init_insn);
10170 if (loop_dump_stream)
10171 fprintf (loop_dump_stream, ".\n");
10175 /* Replace all the instructions from FIRST up to and including LAST
10176 with NOTE_INSN_DELETED notes. */
10178 static void
10179 loop_delete_insns (first, last)
10180 rtx first;
10181 rtx last;
10183 while (1)
10185 if (loop_dump_stream)
10186 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10187 INSN_UID (first));
10188 delete_insn (first);
10190 /* If this was the LAST instructions we're supposed to delete,
10191 we're done. */
10192 if (first == last)
10193 break;
10195 first = NEXT_INSN (first);
10199 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10200 loop LOOP if the order of the sets of these registers can be
10201 swapped. There must be exactly one insn within the loop that sets
10202 this pseudo followed immediately by a move insn that sets
10203 REPLACEMENT with REGNO. */
10204 static void
10205 try_swap_copy_prop (loop, replacement, regno)
10206 const struct loop *loop;
10207 rtx replacement;
10208 unsigned int regno;
10210 rtx insn;
10211 rtx set = NULL_RTX;
10212 unsigned int new_regno;
10214 new_regno = REGNO (replacement);
10216 for (insn = next_insn_in_loop (loop, loop->scan_start);
10217 insn != NULL_RTX;
10218 insn = next_insn_in_loop (loop, insn))
10220 /* Search for the insn that copies REGNO to NEW_REGNO? */
10221 if (INSN_P (insn)
10222 && (set = single_set (insn))
10223 && GET_CODE (SET_DEST (set)) == REG
10224 && REGNO (SET_DEST (set)) == new_regno
10225 && GET_CODE (SET_SRC (set)) == REG
10226 && REGNO (SET_SRC (set)) == regno)
10227 break;
10230 if (insn != NULL_RTX)
10232 rtx prev_insn;
10233 rtx prev_set;
10235 /* Some DEF-USE info would come in handy here to make this
10236 function more general. For now, just check the previous insn
10237 which is the most likely candidate for setting REGNO. */
10239 prev_insn = PREV_INSN (insn);
10241 if (INSN_P (insn)
10242 && (prev_set = single_set (prev_insn))
10243 && GET_CODE (SET_DEST (prev_set)) == REG
10244 && REGNO (SET_DEST (prev_set)) == regno)
10246 /* We have:
10247 (set (reg regno) (expr))
10248 (set (reg new_regno) (reg regno))
10250 so try converting this to:
10251 (set (reg new_regno) (expr))
10252 (set (reg regno) (reg new_regno))
10254 The former construct is often generated when a global
10255 variable used for an induction variable is shadowed by a
10256 register (NEW_REGNO). The latter construct improves the
10257 chances of GIV replacement and BIV elimination. */
10259 validate_change (prev_insn, &SET_DEST (prev_set),
10260 replacement, 1);
10261 validate_change (insn, &SET_DEST (set),
10262 SET_SRC (set), 1);
10263 validate_change (insn, &SET_SRC (set),
10264 replacement, 1);
10266 if (apply_change_group ())
10268 if (loop_dump_stream)
10269 fprintf (loop_dump_stream,
10270 " Swapped set of reg %d at %d with reg %d at %d.\n",
10271 regno, INSN_UID (insn),
10272 new_regno, INSN_UID (prev_insn));
10274 /* Update first use of REGNO. */
10275 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10276 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10278 /* Now perform copy propagation to hopefully
10279 remove all uses of REGNO within the loop. */
10280 try_copy_prop (loop, replacement, regno);
10286 /* Replace MEM with its associated pseudo register. This function is
10287 called from load_mems via for_each_rtx. DATA is actually a pointer
10288 to a structure describing the instruction currently being scanned
10289 and the MEM we are currently replacing. */
10291 static int
10292 replace_loop_mem (mem, data)
10293 rtx *mem;
10294 void *data;
10296 loop_replace_args *args = (loop_replace_args *) data;
10297 rtx m = *mem;
10299 if (m == NULL_RTX)
10300 return 0;
10302 switch (GET_CODE (m))
10304 case MEM:
10305 break;
10307 case CONST_DOUBLE:
10308 /* We're not interested in the MEM associated with a
10309 CONST_DOUBLE, so there's no need to traverse into one. */
10310 return -1;
10312 default:
10313 /* This is not a MEM. */
10314 return 0;
10317 if (!rtx_equal_p (args->match, m))
10318 /* This is not the MEM we are currently replacing. */
10319 return 0;
10321 /* Actually replace the MEM. */
10322 validate_change (args->insn, mem, args->replacement, 1);
10324 return 0;
10327 static void
10328 replace_loop_mems (insn, mem, reg)
10329 rtx insn;
10330 rtx mem;
10331 rtx reg;
10333 loop_replace_args args;
10335 args.insn = insn;
10336 args.match = mem;
10337 args.replacement = reg;
10339 for_each_rtx (&insn, replace_loop_mem, &args);
10342 /* Replace one register with another. Called through for_each_rtx; PX points
10343 to the rtx being scanned. DATA is actually a pointer to
10344 a structure of arguments. */
10346 static int
10347 replace_loop_reg (px, data)
10348 rtx *px;
10349 void *data;
10351 rtx x = *px;
10352 loop_replace_args *args = (loop_replace_args *) data;
10354 if (x == NULL_RTX)
10355 return 0;
10357 if (x == args->match)
10358 validate_change (args->insn, px, args->replacement, 1);
10360 return 0;
10363 static void
10364 replace_loop_regs (insn, reg, replacement)
10365 rtx insn;
10366 rtx reg;
10367 rtx replacement;
10369 loop_replace_args args;
10371 args.insn = insn;
10372 args.match = reg;
10373 args.replacement = replacement;
10375 for_each_rtx (&insn, replace_loop_reg, &args);
10378 /* Replace occurrences of the old exit label for the loop with the new
10379 one. DATA is an rtx_pair containing the old and new labels,
10380 respectively. */
10382 static int
10383 replace_label (x, data)
10384 rtx *x;
10385 void *data;
10387 rtx l = *x;
10388 rtx old_label = ((rtx_pair *) data)->r1;
10389 rtx new_label = ((rtx_pair *) data)->r2;
10391 if (l == NULL_RTX)
10392 return 0;
10394 if (GET_CODE (l) != LABEL_REF)
10395 return 0;
10397 if (XEXP (l, 0) != old_label)
10398 return 0;
10400 XEXP (l, 0) = new_label;
10401 ++LABEL_NUSES (new_label);
10402 --LABEL_NUSES (old_label);
10404 return 0;
10407 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10408 (ignored in the interim). */
10410 static rtx
10411 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10412 const struct loop *loop ATTRIBUTE_UNUSED;
10413 basic_block where_bb ATTRIBUTE_UNUSED;
10414 rtx where_insn;
10415 rtx pattern;
10417 return emit_insn_after (pattern, where_insn);
10421 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10422 in basic block WHERE_BB (ignored in the interim) within the loop
10423 otherwise hoist PATTERN into the loop pre-header. */
10426 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10427 const struct loop *loop;
10428 basic_block where_bb ATTRIBUTE_UNUSED;
10429 rtx where_insn;
10430 rtx pattern;
10432 if (! where_insn)
10433 return loop_insn_hoist (loop, pattern);
10434 return emit_insn_before (pattern, where_insn);
10438 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10439 WHERE_BB (ignored in the interim) within the loop. */
10441 static rtx
10442 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10443 const struct loop *loop ATTRIBUTE_UNUSED;
10444 basic_block where_bb ATTRIBUTE_UNUSED;
10445 rtx where_insn;
10446 rtx pattern;
10448 return emit_call_insn_before (pattern, where_insn);
10452 /* Hoist insn for PATTERN into the loop pre-header. */
10455 loop_insn_hoist (loop, pattern)
10456 const struct loop *loop;
10457 rtx pattern;
10459 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10463 /* Hoist call insn for PATTERN into the loop pre-header. */
10465 static rtx
10466 loop_call_insn_hoist (loop, pattern)
10467 const struct loop *loop;
10468 rtx pattern;
10470 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10474 /* Sink insn for PATTERN after the loop end. */
10477 loop_insn_sink (loop, pattern)
10478 const struct loop *loop;
10479 rtx pattern;
10481 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10484 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10485 and constant. Emit sequence of intructions to load it into REG */
10486 static rtx
10487 gen_load_of_final_value (reg, final_value)
10488 rtx reg, final_value;
10490 rtx seq;
10491 start_sequence ();
10492 final_value = force_operand (final_value, reg);
10493 if (final_value != reg)
10494 emit_move_insn (reg, final_value);
10495 seq = gen_sequence ();
10496 end_sequence ();
10497 return seq;
10500 /* If the loop has multiple exits, emit insn for PATTERN before the
10501 loop to ensure that it will always be executed no matter how the
10502 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10503 since this is slightly more efficient. */
10505 static rtx
10506 loop_insn_sink_or_swim (loop, pattern)
10507 const struct loop *loop;
10508 rtx pattern;
10510 if (loop->exit_count)
10511 return loop_insn_hoist (loop, pattern);
10512 else
10513 return loop_insn_sink (loop, pattern);
10516 static void
10517 loop_ivs_dump (loop, file, verbose)
10518 const struct loop *loop;
10519 FILE *file;
10520 int verbose;
10522 struct iv_class *bl;
10523 int iv_num = 0;
10525 if (! loop || ! file)
10526 return;
10528 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10529 iv_num++;
10531 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10533 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10535 loop_iv_class_dump (bl, file, verbose);
10536 fputc ('\n', file);
10541 static void
10542 loop_iv_class_dump (bl, file, verbose)
10543 const struct iv_class *bl;
10544 FILE *file;
10545 int verbose ATTRIBUTE_UNUSED;
10547 struct induction *v;
10548 rtx incr;
10549 int i;
10551 if (! bl || ! file)
10552 return;
10554 fprintf (file, "IV class for reg %d, benefit %d\n",
10555 bl->regno, bl->total_benefit);
10557 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10558 if (bl->initial_value)
10560 fprintf (file, ", init val: ");
10561 print_simple_rtl (file, bl->initial_value);
10563 if (bl->initial_test)
10565 fprintf (file, ", init test: ");
10566 print_simple_rtl (file, bl->initial_test);
10568 fputc ('\n', file);
10570 if (bl->final_value)
10572 fprintf (file, " Final val: ");
10573 print_simple_rtl (file, bl->final_value);
10574 fputc ('\n', file);
10577 if ((incr = biv_total_increment (bl)))
10579 fprintf (file, " Total increment: ");
10580 print_simple_rtl (file, incr);
10581 fputc ('\n', file);
10584 /* List the increments. */
10585 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10587 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10588 print_simple_rtl (file, v->add_val);
10589 fputc ('\n', file);
10592 /* List the givs. */
10593 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10595 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10596 i, INSN_UID (v->insn), v->benefit);
10597 if (v->giv_type == DEST_ADDR)
10598 print_simple_rtl (file, v->mem);
10599 else
10600 print_simple_rtl (file, single_set (v->insn));
10601 fputc ('\n', file);
10606 static void
10607 loop_biv_dump (v, file, verbose)
10608 const struct induction *v;
10609 FILE *file;
10610 int verbose;
10612 if (! v || ! file)
10613 return;
10615 fprintf (file,
10616 "Biv %d: insn %d",
10617 REGNO (v->dest_reg), INSN_UID (v->insn));
10618 fprintf (file, " const ");
10619 print_simple_rtl (file, v->add_val);
10621 if (verbose && v->final_value)
10623 fputc ('\n', file);
10624 fprintf (file, " final ");
10625 print_simple_rtl (file, v->final_value);
10628 fputc ('\n', file);
10632 static void
10633 loop_giv_dump (v, file, verbose)
10634 const struct induction *v;
10635 FILE *file;
10636 int verbose;
10638 if (! v || ! file)
10639 return;
10641 if (v->giv_type == DEST_REG)
10642 fprintf (file, "Giv %d: insn %d",
10643 REGNO (v->dest_reg), INSN_UID (v->insn));
10644 else
10645 fprintf (file, "Dest address: insn %d",
10646 INSN_UID (v->insn));
10648 fprintf (file, " src reg %d benefit %d",
10649 REGNO (v->src_reg), v->benefit);
10650 fprintf (file, " lifetime %d",
10651 v->lifetime);
10653 if (v->replaceable)
10654 fprintf (file, " replaceable");
10656 if (v->no_const_addval)
10657 fprintf (file, " ncav");
10659 if (v->ext_dependent)
10661 switch (GET_CODE (v->ext_dependent))
10663 case SIGN_EXTEND:
10664 fprintf (file, " ext se");
10665 break;
10666 case ZERO_EXTEND:
10667 fprintf (file, " ext ze");
10668 break;
10669 case TRUNCATE:
10670 fprintf (file, " ext tr");
10671 break;
10672 default:
10673 abort ();
10677 fputc ('\n', file);
10678 fprintf (file, " mult ");
10679 print_simple_rtl (file, v->mult_val);
10681 fputc ('\n', file);
10682 fprintf (file, " add ");
10683 print_simple_rtl (file, v->add_val);
10685 if (verbose && v->final_value)
10687 fputc ('\n', file);
10688 fprintf (file, " final ");
10689 print_simple_rtl (file, v->final_value);
10692 fputc ('\n', file);
10696 void
10697 debug_ivs (loop)
10698 const struct loop *loop;
10700 loop_ivs_dump (loop, stderr, 1);
10704 void
10705 debug_iv_class (bl)
10706 const struct iv_class *bl;
10708 loop_iv_class_dump (bl, stderr, 1);
10712 void
10713 debug_biv (v)
10714 const struct induction *v;
10716 loop_biv_dump (v, stderr, 1);
10720 void
10721 debug_giv (v)
10722 const struct induction *v;
10724 loop_giv_dump (v, stderr, 1);
10728 #define LOOP_BLOCK_NUM_1(INSN) \
10729 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10731 /* The notes do not have an assigned block, so look at the next insn. */
10732 #define LOOP_BLOCK_NUM(INSN) \
10733 ((INSN) ? (GET_CODE (INSN) == NOTE \
10734 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10735 : LOOP_BLOCK_NUM_1 (INSN)) \
10736 : -1)
10738 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10740 static void
10741 loop_dump_aux (loop, file, verbose)
10742 const struct loop *loop;
10743 FILE *file;
10744 int verbose ATTRIBUTE_UNUSED;
10746 rtx label;
10748 if (! loop || ! file)
10749 return;
10751 /* Print diagnostics to compare our concept of a loop with
10752 what the loop notes say. */
10753 if (! PREV_INSN (loop->first->head)
10754 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10755 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10756 != NOTE_INSN_LOOP_BEG)
10757 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10758 INSN_UID (PREV_INSN (loop->first->head)));
10759 if (! NEXT_INSN (loop->last->end)
10760 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10761 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10762 != NOTE_INSN_LOOP_END)
10763 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10764 INSN_UID (NEXT_INSN (loop->last->end)));
10766 if (loop->start)
10768 fprintf (file,
10769 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10770 LOOP_BLOCK_NUM (loop->start),
10771 LOOP_INSN_UID (loop->start),
10772 LOOP_BLOCK_NUM (loop->cont),
10773 LOOP_INSN_UID (loop->cont),
10774 LOOP_BLOCK_NUM (loop->cont),
10775 LOOP_INSN_UID (loop->cont),
10776 LOOP_BLOCK_NUM (loop->vtop),
10777 LOOP_INSN_UID (loop->vtop),
10778 LOOP_BLOCK_NUM (loop->end),
10779 LOOP_INSN_UID (loop->end));
10780 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10781 LOOP_BLOCK_NUM (loop->top),
10782 LOOP_INSN_UID (loop->top),
10783 LOOP_BLOCK_NUM (loop->scan_start),
10784 LOOP_INSN_UID (loop->scan_start));
10785 fprintf (file, ";; exit_count %d", loop->exit_count);
10786 if (loop->exit_count)
10788 fputs (", labels:", file);
10789 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10791 fprintf (file, " %d ",
10792 LOOP_INSN_UID (XEXP (label, 0)));
10795 fputs ("\n", file);
10797 /* This can happen when a marked loop appears as two nested loops,
10798 say from while (a || b) {}. The inner loop won't match
10799 the loop markers but the outer one will. */
10800 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10801 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10805 /* Call this function from the debugger to dump LOOP. */
10807 void
10808 debug_loop (loop)
10809 const struct loop *loop;
10811 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10814 /* Call this function from the debugger to dump LOOPS. */
10816 void
10817 debug_loops (loops)
10818 const struct loops *loops;
10820 flow_loops_dump (loops, stderr, loop_dump_aux, 1);