2006-11-15 Rask Ingemann Lambertsen <rask@sygehus.dk>
[official-gcc.git] / gcc / fwprop.c
blobfb601e1a4f00367b6fb7e160af35b5c768bbfb30
1 /* RTL-based forward propagation pass for GNU compiler.
2 Copyright (C) 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Paolo Bonzini and Steven Bosscher.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "toplev.h"
28 #include "timevar.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "emit-rtl.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "flags.h"
35 #include "obstack.h"
36 #include "basic-block.h"
37 #include "output.h"
38 #include "df.h"
39 #include "target.h"
40 #include "cfgloop.h"
41 #include "tree-pass.h"
44 /* This pass does simple forward propagation and simplification when an
45 operand of an insn can only come from a single def. This pass uses
46 df.c, so it is global. However, we only do limited analysis of
47 available expressions.
49 1) The pass tries to propagate the source of the def into the use,
50 and checks if the result is independent of the substituted value.
51 For example, the high word of a (zero_extend:DI (reg:SI M)) is always
52 zero, independent of the source register.
54 In particular, we propagate constants into the use site. Sometimes
55 RTL expansion did not put the constant in the same insn on purpose,
56 to satisfy a predicate, and the result will fail to be recognized;
57 but this happens rarely and in this case we can still create a
58 REG_EQUAL note. For multi-word operations, this
60 (set (subreg:SI (reg:DI 120) 0) (const_int 0))
61 (set (subreg:SI (reg:DI 120) 4) (const_int -1))
62 (set (subreg:SI (reg:DI 122) 0)
63 (ior:SI (subreg:SI (reg:DI 119) 0) (subreg:SI (reg:DI 120) 0)))
64 (set (subreg:SI (reg:DI 122) 4)
65 (ior:SI (subreg:SI (reg:DI 119) 4) (subreg:SI (reg:DI 120) 4)))
67 can be simplified to the much simpler
69 (set (subreg:SI (reg:DI 122) 0) (subreg:SI (reg:DI 119)))
70 (set (subreg:SI (reg:DI 122) 4) (const_int -1))
72 This particular propagation is also effective at putting together
73 complex addressing modes. We are more aggressive inside MEMs, in
74 that all definitions are propagated if the use is in a MEM; if the
75 result is a valid memory address we check address_cost to decide
76 whether the substitution is worthwhile.
78 2) The pass propagates register copies. This is not as effective as
79 the copy propagation done by CSE's canon_reg, which works by walking
80 the instruction chain, it can help the other transformations.
82 We should consider removing this optimization, and instead reorder the
83 RTL passes, because GCSE does this transformation too. With some luck,
84 the CSE pass at the end of rest_of_handle_gcse could also go away.
86 3) The pass looks for paradoxical subregs that are actually unnecessary.
87 Things like this:
89 (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
90 (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
91 (set (reg:SI 122) (plus:SI (subreg:SI (reg:QI 120) 0)
92 (subreg:SI (reg:QI 121) 0)))
94 are very common on machines that can only do word-sized operations.
95 For each use of a paradoxical subreg (subreg:WIDER (reg:NARROW N) 0),
96 if it has a single def and it is (subreg:NARROW (reg:WIDE M) 0),
97 we can replace the paradoxical subreg with simply (reg:WIDE M). The
98 above will simplify this to
100 (set (reg:QI 120) (subreg:QI (reg:SI 118) 0))
101 (set (reg:QI 121) (subreg:QI (reg:SI 119) 0))
102 (set (reg:SI 122) (plus:SI (reg:SI 118) (reg:SI 119)))
104 where the first two insns are now dead. */
107 static struct loops loops;
108 static struct df *df;
109 static int num_changes;
112 /* Do not try to replace constant addresses or addresses of local and
113 argument slots. These MEM expressions are made only once and inserted
114 in many instructions, as well as being used to control symbol table
115 output. It is not safe to clobber them.
117 There are some uncommon cases where the address is already in a register
118 for some reason, but we cannot take advantage of that because we have
119 no easy way to unshare the MEM. In addition, looking up all stack
120 addresses is costly. */
122 static bool
123 can_simplify_addr (rtx addr)
125 rtx reg;
127 if (CONSTANT_ADDRESS_P (addr))
128 return false;
130 if (GET_CODE (addr) == PLUS)
131 reg = XEXP (addr, 0);
132 else
133 reg = addr;
135 return (!REG_P (reg)
136 || (REGNO (reg) != FRAME_POINTER_REGNUM
137 && REGNO (reg) != HARD_FRAME_POINTER_REGNUM
138 && REGNO (reg) != ARG_POINTER_REGNUM));
141 /* Returns a canonical version of X for the address, from the point of view,
142 that all multiplications are represented as MULT instead of the multiply
143 by a power of 2 being represented as ASHIFT.
145 Every ASHIFT we find has been made by simplify_gen_binary and was not
146 there before, so it is not shared. So we can do this in place. */
148 static void
149 canonicalize_address (rtx x)
151 for (;;)
152 switch (GET_CODE (x))
154 case ASHIFT:
155 if (GET_CODE (XEXP (x, 1)) == CONST_INT
156 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x))
157 && INTVAL (XEXP (x, 1)) >= 0)
159 HOST_WIDE_INT shift = INTVAL (XEXP (x, 1));
160 PUT_CODE (x, MULT);
161 XEXP (x, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift,
162 GET_MODE (x));
165 x = XEXP (x, 0);
166 break;
168 case PLUS:
169 if (GET_CODE (XEXP (x, 0)) == PLUS
170 || GET_CODE (XEXP (x, 0)) == ASHIFT
171 || GET_CODE (XEXP (x, 0)) == CONST)
172 canonicalize_address (XEXP (x, 0));
174 x = XEXP (x, 1);
175 break;
177 case CONST:
178 x = XEXP (x, 0);
179 break;
181 default:
182 return;
186 /* OLD is a memory address. Return whether it is good to use NEW instead,
187 for a memory access in the given MODE. */
189 static bool
190 should_replace_address (rtx old, rtx new, enum machine_mode mode)
192 int gain;
194 if (rtx_equal_p (old, new) || !memory_address_p (mode, new))
195 return false;
197 /* Copy propagation is always ok. */
198 if (REG_P (old) && REG_P (new))
199 return true;
201 /* Prefer the new address if it is less expensive. */
202 gain = address_cost (old, mode) - address_cost (new, mode);
204 /* If the addresses have equivalent cost, prefer the new address
205 if it has the highest `rtx_cost'. That has the potential of
206 eliminating the most insns without additional costs, and it
207 is the same that cse.c used to do. */
208 if (gain == 0)
209 gain = rtx_cost (new, SET) - rtx_cost (old, SET);
211 return (gain > 0);
214 /* Replace all occurrences of OLD in *PX with NEW and try to simplify the
215 resulting expression. Replace *PX with a new RTL expression if an
216 occurrence of OLD was found.
218 If CAN_APPEAR is true, we always return true; if it is false, we
219 can return false if, for at least one occurrence OLD, we failed to
220 collapse the result to a constant. For example, (mult:M (reg:M A)
221 (minus:M (reg:M B) (reg:M A))) may collapse to zero if replacing
222 (reg:M B) with (reg:M A).
224 CAN_APPEAR is disregarded inside MEMs: in that case, we always return
225 true if the simplification is a cheaper and valid memory address.
227 This is only a wrapper around simplify-rtx.c: do not add any pattern
228 matching code here. (The sole exception is the handling of LO_SUM, but
229 that is because there is no simplify_gen_* function for LO_SUM). */
231 static bool
232 propagate_rtx_1 (rtx *px, rtx old, rtx new, bool can_appear)
234 rtx x = *px, tem = NULL_RTX, op0, op1, op2;
235 enum rtx_code code = GET_CODE (x);
236 enum machine_mode mode = GET_MODE (x);
237 enum machine_mode op_mode;
238 bool valid_ops = true;
240 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression,
241 try to build a new expression from recursive substitution. */
243 if (x == old)
245 *px = new;
246 return can_appear;
249 switch (GET_RTX_CLASS (code))
251 case RTX_UNARY:
252 op0 = XEXP (x, 0);
253 op_mode = GET_MODE (op0);
254 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
255 if (op0 == XEXP (x, 0))
256 return true;
257 tem = simplify_gen_unary (code, mode, op0, op_mode);
258 break;
260 case RTX_BIN_ARITH:
261 case RTX_COMM_ARITH:
262 op0 = XEXP (x, 0);
263 op1 = XEXP (x, 1);
264 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
265 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
266 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
267 return true;
268 tem = simplify_gen_binary (code, mode, op0, op1);
269 break;
271 case RTX_COMPARE:
272 case RTX_COMM_COMPARE:
273 op0 = XEXP (x, 0);
274 op1 = XEXP (x, 1);
275 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
276 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
277 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
278 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
279 return true;
280 tem = simplify_gen_relational (code, mode, op_mode, op0, op1);
281 break;
283 case RTX_TERNARY:
284 case RTX_BITFIELD_OPS:
285 op0 = XEXP (x, 0);
286 op1 = XEXP (x, 1);
287 op2 = XEXP (x, 2);
288 op_mode = GET_MODE (op0);
289 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
290 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
291 valid_ops &= propagate_rtx_1 (&op2, old, new, can_appear);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
293 return true;
294 if (op_mode == VOIDmode)
295 op_mode = GET_MODE (op0);
296 tem = simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
297 break;
299 case RTX_EXTRA:
300 /* The only case we try to handle is a SUBREG. */
301 if (code == SUBREG)
303 op0 = XEXP (x, 0);
304 valid_ops &= propagate_rtx_1 (&op0, old, new, can_appear);
305 if (op0 == XEXP (x, 0))
306 return true;
307 tem = simplify_gen_subreg (mode, op0, GET_MODE (SUBREG_REG (x)),
308 SUBREG_BYTE (x));
310 break;
312 case RTX_OBJ:
313 if (code == MEM && x != new)
315 rtx new_op0;
316 op0 = XEXP (x, 0);
318 /* There are some addresses that we cannot work on. */
319 if (!can_simplify_addr (op0))
320 return true;
322 op0 = new_op0 = targetm.delegitimize_address (op0);
323 valid_ops &= propagate_rtx_1 (&new_op0, old, new, true);
325 /* Dismiss transformation that we do not want to carry on. */
326 if (!valid_ops
327 || new_op0 == op0
328 || GET_MODE (new_op0) != GET_MODE (op0))
329 return true;
331 canonicalize_address (new_op0);
333 /* Copy propagations are always ok. Otherwise check the costs. */
334 if (!(REG_P (old) && REG_P (new))
335 && !should_replace_address (op0, new_op0, GET_MODE (x)))
336 return true;
338 tem = replace_equiv_address_nv (x, new_op0);
341 else if (code == LO_SUM)
343 op0 = XEXP (x, 0);
344 op1 = XEXP (x, 1);
346 /* The only simplification we do attempts to remove references to op0
347 or make it constant -- in both cases, op0's invalidity will not
348 make the result invalid. */
349 propagate_rtx_1 (&op0, old, new, true);
350 valid_ops &= propagate_rtx_1 (&op1, old, new, can_appear);
351 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352 return true;
354 /* (lo_sum (high x) x) -> x */
355 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
356 tem = op1;
357 else
358 tem = gen_rtx_LO_SUM (mode, op0, op1);
360 /* OP1 is likely not a legitimate address, otherwise there would have
361 been no LO_SUM. We want it to disappear if it is invalid, return
362 false in that case. */
363 return memory_address_p (mode, tem);
366 else if (code == REG)
368 if (rtx_equal_p (x, old))
370 *px = new;
371 return can_appear;
374 break;
376 default:
377 break;
380 /* No change, no trouble. */
381 if (tem == NULL_RTX)
382 return true;
384 *px = tem;
386 /* The replacement we made so far is valid, if all of the recursive
387 replacements were valid, or we could simplify everything to
388 a constant. */
389 return valid_ops || can_appear || CONSTANT_P (tem);
392 /* Replace all occurrences of OLD in X with NEW and try to simplify the
393 resulting expression (in mode MODE). Return a new expresion if it is
394 a constant, otherwise X.
396 Simplifications where occurrences of NEW collapse to a constant are always
397 accepted. All simplifications are accepted if NEW is a pseudo too.
398 Otherwise, we accept simplifications that have a lower or equal cost. */
400 static rtx
401 propagate_rtx (rtx x, enum machine_mode mode, rtx old, rtx new)
403 rtx tem;
404 bool collapsed;
406 if (REG_P (new) && REGNO (new) < FIRST_PSEUDO_REGISTER)
407 return NULL_RTX;
409 new = copy_rtx (new);
411 tem = x;
412 collapsed = propagate_rtx_1 (&tem, old, new, REG_P (new) || CONSTANT_P (new));
413 if (tem == x || !collapsed)
414 return NULL_RTX;
416 /* gen_lowpart_common will not be able to process VOIDmode entities other
417 than CONST_INTs. */
418 if (GET_MODE (tem) == VOIDmode && GET_CODE (tem) != CONST_INT)
419 return NULL_RTX;
421 if (GET_MODE (tem) == VOIDmode)
422 tem = rtl_hooks.gen_lowpart_no_emit (mode, tem);
423 else
424 gcc_assert (GET_MODE (tem) == mode);
426 return tem;
432 /* Return true if the register from reference REF is killed
433 between FROM to (but not including) TO. */
435 static bool
436 local_ref_killed_between_p (struct df_ref * ref, rtx from, rtx to)
438 rtx insn;
439 struct df_ref *def;
441 for (insn = from; insn != to; insn = NEXT_INSN (insn))
443 if (!INSN_P (insn))
444 continue;
446 def = DF_INSN_DEFS (df, insn);
447 while (def)
449 if (DF_REF_REGNO (ref) == DF_REF_REGNO (def))
450 return true;
451 def = def->next_ref;
454 return false;
458 /* Check if the given DEF is available in INSN. This would require full
459 computation of available expressions; we check only restricted conditions:
460 - if DEF is the sole definition of its register, go ahead;
461 - in the same basic block, we check for no definitions killing the
462 definition of DEF_INSN;
463 - if USE's basic block has DEF's basic block as the sole predecessor,
464 we check if the definition is killed after DEF_INSN or before
465 TARGET_INSN insn, in their respective basic blocks. */
466 static bool
467 use_killed_between (struct df_ref *use, rtx def_insn, rtx target_insn)
469 basic_block def_bb = BLOCK_FOR_INSN (def_insn);
470 basic_block target_bb = BLOCK_FOR_INSN (target_insn);
471 int regno;
472 struct df_ref * def;
474 /* In some obscure situations we can have a def reaching a use
475 that is _before_ the def. In other words the def does not
476 dominate the use even though the use and def are in the same
477 basic block. This can happen when a register may be used
478 uninitialized in a loop. In such cases, we must assume that
479 DEF is not available. */
480 if (def_bb == target_bb
481 ? DF_INSN_LUID (df, def_insn) >= DF_INSN_LUID (df, target_insn)
482 : !dominated_by_p (CDI_DOMINATORS, target_bb, def_bb))
483 return true;
485 /* Check if the reg in USE has only one definition. We already
486 know that this definition reaches use, or we wouldn't be here. */
487 regno = DF_REF_REGNO (use);
488 def = DF_REG_DEF_GET (df, regno)->reg_chain;
489 if (def && (def->next_reg == NULL))
490 return false;
492 /* Check locally if we are in the same basic block. */
493 if (def_bb == target_bb)
494 return local_ref_killed_between_p (use, def_insn, target_insn);
496 /* Finally, if DEF_BB is the sole predecessor of TARGET_BB. */
497 if (single_pred_p (target_bb)
498 && single_pred (target_bb) == def_bb)
500 struct df_ref *x;
502 /* See if USE is killed between DEF_INSN and the last insn in the
503 basic block containing DEF_INSN. */
504 x = df_bb_regno_last_def_find (df, def_bb, regno);
505 if (x && DF_INSN_LUID (df, x->insn) >= DF_INSN_LUID (df, def_insn))
506 return true;
508 /* See if USE is killed between TARGET_INSN and the first insn in the
509 basic block containing TARGET_INSN. */
510 x = df_bb_regno_first_def_find (df, target_bb, regno);
511 if (x && DF_INSN_LUID (df, x->insn) < DF_INSN_LUID (df, target_insn))
512 return true;
514 return false;
517 /* Otherwise assume the worst case. */
518 return true;
522 /* for_each_rtx traversal function that returns 1 if BODY points to
523 a non-constant mem. */
525 static int
526 varying_mem_p (rtx *body, void *data ATTRIBUTE_UNUSED)
528 rtx x = *body;
529 return MEM_P (x) && !MEM_READONLY_P (x);
532 /* Check if all uses in DEF_INSN can be used in TARGET_INSN. This
533 would require full computation of available expressions;
534 we check only restricted conditions, see use_killed_between. */
535 static bool
536 all_uses_available_at (rtx def_insn, rtx target_insn)
538 struct df_ref * use;
539 rtx def_set = single_set (def_insn);
541 gcc_assert (def_set);
543 /* If target_insn comes right after def_insn, which is very common
544 for addresses, we can use a quicker test. */
545 if (NEXT_INSN (def_insn) == target_insn
546 && REG_P (SET_DEST (def_set)))
548 rtx def_reg = SET_DEST (def_set);
550 /* If the insn uses the reg that it defines, the substitution is
551 invalid. */
552 for (use = DF_INSN_USES (df, def_insn); use; use = use->next_ref)
553 if (rtx_equal_p (use->reg, def_reg))
554 return false;
556 else
558 /* Look at all the uses of DEF_INSN, and see if they are not
559 killed between DEF_INSN and TARGET_INSN. */
560 for (use = DF_INSN_USES (df, def_insn); use; use = use->next_ref)
561 if (use_killed_between (use, def_insn, target_insn))
562 return false;
565 /* We don't do any analysis of memories or aliasing. Reject any
566 instruction that involves references to non-constant memory. */
567 return !for_each_rtx (&SET_SRC (def_set), varying_mem_p, NULL);
571 struct find_occurrence_data
573 rtx find;
574 rtx *retval;
577 /* Callback for for_each_rtx, used in find_occurrence.
578 See if PX is the rtx we have to find. Return 1 to stop for_each_rtx
579 if successful, or 0 to continue traversing otherwise. */
581 static int
582 find_occurrence_callback (rtx *px, void *data)
584 struct find_occurrence_data *fod = (struct find_occurrence_data *) data;
585 rtx x = *px;
586 rtx find = fod->find;
588 if (x == find)
590 fod->retval = px;
591 return 1;
594 return 0;
597 /* Return a pointer to one of the occurrences of register FIND in *PX. */
599 static rtx *
600 find_occurrence (rtx *px, rtx find)
602 struct find_occurrence_data data;
604 gcc_assert (REG_P (find)
605 || (GET_CODE (find) == SUBREG
606 && REG_P (SUBREG_REG (find))));
608 data.find = find;
609 data.retval = NULL;
610 for_each_rtx (px, find_occurrence_callback, &data);
611 return data.retval;
615 /* Inside INSN, the expression rooted at *LOC has been changed, moving some
616 uses from ORIG_USES. Find those that are present, and create new items
617 in the data flow object of the pass. Mark any new uses as having the
618 given TYPE. */
619 static void
620 update_df (rtx insn, rtx *loc, struct df_ref *orig_uses, enum df_ref_type type,
621 int new_flags)
623 struct df_ref *use;
625 /* Add a use for the registers that were propagated. */
626 for (use = orig_uses; use; use = use->next_ref)
628 struct df_ref *orig_use = use, *new_use;
629 rtx *new_loc = find_occurrence (loc, DF_REF_REG (orig_use));
631 if (!new_loc)
632 continue;
634 /* Add a new insn use. Use the original type, because it says if the
635 use was within a MEM. */
636 new_use = df_ref_create (df, DF_REF_REG (orig_use), new_loc,
637 insn, BLOCK_FOR_INSN (insn),
638 type, DF_REF_FLAGS (orig_use) | new_flags);
640 /* Set up the use-def chain. */
641 df_chain_copy (df->problems_by_index[DF_CHAIN],
642 new_use, DF_REF_CHAIN (orig_use));
647 /* Try substituting NEW into LOC, which originated from forward propagation
648 of USE's value from DEF_INSN. SET_REG_EQUAL says whether we are
649 substituting the whole SET_SRC, so we can set a REG_EQUAL note if the
650 new insn is not recognized. Return whether the substitution was
651 performed. */
653 static bool
654 try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_reg_equal)
656 rtx insn = DF_REF_INSN (use);
657 enum df_ref_type type = DF_REF_TYPE (use);
658 int flags = DF_REF_FLAGS (use);
660 if (dump_file)
662 fprintf (dump_file, "\nIn insn %d, replacing\n ", INSN_UID (insn));
663 print_inline_rtx (dump_file, *loc, 2);
664 fprintf (dump_file, "\n with ");
665 print_inline_rtx (dump_file, new, 2);
666 fprintf (dump_file, "\n");
669 if (validate_change (insn, loc, new, false))
671 num_changes++;
672 if (dump_file)
673 fprintf (dump_file, "Changed insn %d\n", INSN_UID (insn));
675 /* Unlink the use that we changed. */
676 df_ref_remove (df, use);
677 if (!CONSTANT_P (new))
678 update_df (insn, loc, DF_INSN_USES (df, def_insn), type, flags);
680 return true;
682 else
684 if (dump_file)
685 fprintf (dump_file, "Changes to insn %d not recognized\n",
686 INSN_UID (insn));
688 /* Can also record a simplified value in a REG_EQUAL note, making a
689 new one if one does not already exist. */
690 if (set_reg_equal)
692 if (dump_file)
693 fprintf (dump_file, " Setting REG_EQUAL note\n");
695 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, copy_rtx (new),
696 REG_NOTES (insn));
698 if (!CONSTANT_P (new))
699 update_df (insn, loc, DF_INSN_USES (df, def_insn),
700 type, DF_REF_IN_NOTE);
703 return false;
708 /* If USE is a paradoxical subreg, see if it can be replaced by a pseudo. */
710 static bool
711 forward_propagate_subreg (struct df_ref *use, rtx def_insn, rtx def_set)
713 rtx use_reg = DF_REF_REG (use);
714 rtx use_insn, src;
716 /* Only consider paradoxical subregs... */
717 enum machine_mode use_mode = GET_MODE (use_reg);
718 if (GET_CODE (use_reg) != SUBREG
719 || !REG_P (SET_DEST (def_set))
720 || GET_MODE_SIZE (use_mode)
721 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (use_reg))))
722 return false;
724 /* If this is a paradoxical SUBREG, we have no idea what value the
725 extra bits would have. However, if the operand is equivalent to
726 a SUBREG whose operand is the same as our mode, and all the modes
727 are within a word, we can just use the inner operand because
728 these SUBREGs just say how to treat the register. */
729 use_insn = DF_REF_INSN (use);
730 src = SET_SRC (def_set);
731 if (GET_CODE (src) == SUBREG
732 && REG_P (SUBREG_REG (src))
733 && GET_MODE (SUBREG_REG (src)) == use_mode
734 && subreg_lowpart_p (src)
735 && all_uses_available_at (def_insn, use_insn))
736 return try_fwprop_subst (use, DF_REF_LOC (use), SUBREG_REG (src),
737 def_insn, false);
738 else
739 return false;
742 /* Try to replace USE with SRC (defined in DEF_INSN) and simplify the
743 result. */
745 static bool
746 forward_propagate_and_simplify (struct df_ref *use, rtx def_insn, rtx def_set)
748 rtx use_insn = DF_REF_INSN (use);
749 rtx use_set = single_set (use_insn);
750 rtx src, reg, new, *loc;
751 bool set_reg_equal;
752 enum machine_mode mode;
754 if (!use_set)
755 return false;
757 /* Do not propagate into PC, CC0, etc. */
758 if (GET_MODE (SET_DEST (use_set)) == VOIDmode)
759 return false;
761 /* If def and use are subreg, check if they match. */
762 reg = DF_REF_REG (use);
763 if (GET_CODE (reg) == SUBREG
764 && GET_CODE (SET_DEST (def_set)) == SUBREG
765 && (SUBREG_BYTE (SET_DEST (def_set)) != SUBREG_BYTE (reg)
766 || GET_MODE (SET_DEST (def_set)) != GET_MODE (reg)))
767 return false;
769 /* Check if the def had a subreg, but the use has the whole reg. */
770 if (REG_P (reg) && GET_CODE (SET_DEST (def_set)) == SUBREG)
771 return false;
773 /* Check if the use has a subreg, but the def had the whole reg. Unlike the
774 previous case, the optimization is possible and often useful indeed. */
775 if (GET_CODE (reg) == SUBREG && REG_P (SET_DEST (def_set)))
776 reg = SUBREG_REG (reg);
778 /* Check if the substitution is valid (last, because it's the most
779 expensive check!). */
780 src = SET_SRC (def_set);
781 if (!CONSTANT_P (src) && !all_uses_available_at (def_insn, use_insn))
782 return false;
784 /* Check if the def is loading something from the constant pool; in this
785 case we would undo optimization such as compress_float_constant.
786 Still, we can set a REG_EQUAL note. */
787 if (MEM_P (src) && MEM_READONLY_P (src))
789 rtx x = avoid_constant_pool_reference (src);
790 if (x != src)
792 rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
793 rtx old = note ? XEXP (note, 0) : SET_SRC (use_set);
794 rtx new = simplify_replace_rtx (old, src, x);
795 if (old != new)
796 set_unique_reg_note (use_insn, REG_EQUAL, copy_rtx (new));
798 return false;
801 /* Else try simplifying. */
803 if (DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE)
805 loc = &SET_DEST (use_set);
806 set_reg_equal = false;
808 else
810 rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
811 if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
812 loc = &XEXP (note, 0);
813 else
814 loc = &SET_SRC (use_set);
816 /* Do not replace an existing REG_EQUAL note if the insn is not
817 recognized. Either we're already replacing in the note, or
818 we'll separately try plugging the definition in the note and
819 simplifying. */
820 set_reg_equal = (note == NULL_RTX);
823 if (GET_MODE (*loc) == VOIDmode)
824 mode = GET_MODE (SET_DEST (use_set));
825 else
826 mode = GET_MODE (*loc);
828 new = propagate_rtx (*loc, mode, reg, src);
830 if (!new)
831 return false;
833 return try_fwprop_subst (use, loc, new, def_insn, set_reg_equal);
837 /* Given a use USE of an insn, if it has a single reaching
838 definition, try to forward propagate it into that insn. */
840 static void
841 forward_propagate_into (struct df_ref *use)
843 struct df_link *defs;
844 struct df_ref *def;
845 rtx def_insn, def_set, use_insn;
846 rtx parent;
848 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
849 return;
851 /* Only consider uses that have a single definition. */
852 defs = DF_REF_CHAIN (use);
853 if (!defs || defs->next)
854 return;
856 def = defs->ref;
857 if (DF_REF_FLAGS (def) & DF_REF_READ_WRITE)
858 return;
860 /* Do not propagate loop invariant definitions inside the loop if
861 we are going to unroll. */
862 if (loops.num > 0
863 && DF_REF_BB (def)->loop_father != DF_REF_BB (use)->loop_father)
864 return;
866 /* Check if the use is still present in the insn! */
867 use_insn = DF_REF_INSN (use);
868 if (DF_REF_FLAGS (use) & DF_REF_IN_NOTE)
869 parent = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
870 else
871 parent = PATTERN (use_insn);
873 if (!loc_mentioned_in_p (DF_REF_LOC (use), parent))
874 return;
876 def_insn = DF_REF_INSN (def);
877 def_set = single_set (def_insn);
878 if (!def_set)
879 return;
881 /* Only try one kind of propagation. If two are possible, we'll
882 do it on the following iterations. */
883 if (!forward_propagate_and_simplify (use, def_insn, def_set))
884 forward_propagate_subreg (use, def_insn, def_set);
888 static void
889 fwprop_init (void)
891 num_changes = 0;
892 calculate_dominance_info (CDI_DOMINATORS);
894 /* We do not always want to propagate into loops, so we have to find
895 loops and be careful about them. But we have to call flow_loops_find
896 before df_analyze, because flow_loops_find may introduce new jump
897 insns (sadly) if we are not working in cfglayout mode. */
898 if (flag_rerun_cse_after_loop && (flag_unroll_loops || flag_peel_loops))
899 flow_loops_find (&loops);
901 /* Now set up the dataflow problem (we only want use-def chains) and
902 put the dataflow solver to work. */
903 df = df_init (DF_SUBREGS | DF_EQUIV_NOTES);
904 df_chain_add_problem (df, DF_UD_CHAIN);
905 df_analyze (df);
906 df_dump (df, dump_file);
909 static void
910 fwprop_done (void)
912 df_finish (df);
914 if (flag_rerun_cse_after_loop && (flag_unroll_loops || flag_peel_loops))
916 flow_loops_free (&loops);
917 loops.num = 0;
920 free_dominance_info (CDI_DOMINATORS);
921 cleanup_cfg (0);
922 delete_trivially_dead_insns (get_insns (), max_reg_num ());
924 if (dump_file)
925 fprintf (dump_file,
926 "\nNumber of successful forward propagations: %d\n\n",
927 num_changes);
932 /* Main entry point. */
934 static bool
935 gate_fwprop (void)
937 return optimize > 0 && flag_forward_propagate;
940 static unsigned int
941 fwprop (void)
943 unsigned i;
945 fwprop_init ();
947 /* Go through all the uses. update_df will create new ones at the
948 end, and we'll go through them as well.
950 Do not forward propagate addresses into loops until after unrolling.
951 CSE did so because it was able to fix its own mess, but we are not. */
953 df_reorganize_refs (&df->use_info);
954 for (i = 0; i < DF_USES_SIZE (df); i++)
956 struct df_ref *use = DF_USES_GET (df, i);
957 if (use)
958 if (loops.num == 0
959 || DF_REF_TYPE (use) == DF_REF_REG_USE
960 || DF_REF_BB (use)->loop_father == NULL)
961 forward_propagate_into (use);
964 fwprop_done ();
966 return 0;
969 struct tree_opt_pass pass_rtl_fwprop =
971 "fwprop1", /* name */
972 gate_fwprop, /* gate */
973 fwprop, /* execute */
974 NULL, /* sub */
975 NULL, /* next */
976 0, /* static_pass_number */
977 TV_FWPROP, /* tv_id */
978 0, /* properties_required */
979 0, /* properties_provided */
980 0, /* properties_destroyed */
981 0, /* todo_flags_start */
982 TODO_dump_func, /* todo_flags_finish */
983 0 /* letter */
986 static bool
987 gate_fwprop_addr (void)
989 return optimize > 0 && flag_forward_propagate && flag_rerun_cse_after_loop
990 && (flag_unroll_loops || flag_peel_loops);
993 static unsigned int
994 fwprop_addr (void)
996 unsigned i;
997 fwprop_init ();
999 /* Go through all the uses. update_df will create new ones at the
1000 end, and we'll go through them as well. */
1001 df_reorganize_refs (&df->use_info);
1002 for (i = 0; i < DF_USES_SIZE (df); i++)
1004 struct df_ref *use = DF_USES_GET (df, i);
1005 if (use)
1006 if (DF_REF_TYPE (use) != DF_REF_REG_USE
1007 && DF_REF_BB (use)->loop_father != NULL)
1008 forward_propagate_into (use);
1011 fwprop_done ();
1013 return 0;
1016 struct tree_opt_pass pass_rtl_fwprop_addr =
1018 "fwprop2", /* name */
1019 gate_fwprop_addr, /* gate */
1020 fwprop_addr, /* execute */
1021 NULL, /* sub */
1022 NULL, /* next */
1023 0, /* static_pass_number */
1024 TV_FWPROP, /* tv_id */
1025 0, /* properties_required */
1026 0, /* properties_provided */
1027 0, /* properties_destroyed */
1028 0, /* todo_flags_start */
1029 TODO_dump_func, /* todo_flags_finish */
1030 0 /* letter */