1 /* RTL-level loop invariant motion.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This implements the loop invariant motion pass. It is very simple
21 (no calls, no loads/stores, etc.). This should be sufficient to cleanup
22 things like address arithmetics -- other more complicated invariants should
23 be eliminated on GIMPLE either in tree-ssa-loop-im.c or in tree-ssa-pre.c.
25 We proceed loop by loop -- it is simpler than trying to handle things
26 globally and should not lose much. First we inspect all sets inside loop
27 and create a dependency graph on insns (saying "to move this insn, you must
28 also move the following insns").
30 We then need to determine what to move. We estimate the number of registers
31 used and move as many invariants as possible while we still have enough free
32 registers. We prefer the expensive invariants.
34 Then we move the selected invariants out of the loop, creating a new
35 temporaries for them if necessary. */
39 #include "coretypes.h"
47 #include "insn-config.h"
57 /* The data stored for the loop. */
61 struct loop
*outermost_exit
; /* The outermost exit of the loop. */
62 bool has_call
; /* True if the loop contains a call. */
63 /* Maximal register pressure inside loop for given register class
64 (defined only for the pressure classes). */
65 int max_reg_pressure
[N_REG_CLASSES
];
66 /* Loop regs referenced and live pseudo-registers. */
68 bitmap_head regs_live
;
71 #define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
73 /* The description of an use. */
77 rtx
*pos
; /* Position of the use. */
78 rtx_insn
*insn
; /* The insn in that the use occurs. */
79 unsigned addr_use_p
; /* Whether the use occurs in an address. */
80 struct use
*next
; /* Next use in the list. */
83 /* The description of a def. */
87 struct use
*uses
; /* The list of uses that are uniquely reached
89 unsigned n_uses
; /* Number of such uses. */
90 unsigned n_addr_uses
; /* Number of uses in addresses. */
91 unsigned invno
; /* The corresponding invariant. */
92 bool can_prop_to_addr_uses
; /* True if the corresponding inv can be
93 propagated into its address uses. */
96 /* The data stored for each invariant. */
100 /* The number of the invariant. */
103 /* The number of the invariant with the same value. */
106 /* The number of invariants which eqto this. */
109 /* If we moved the invariant out of the loop, the register that contains its
113 /* If we moved the invariant out of the loop, the original regno
114 that contained its value. */
117 /* The definition of the invariant. */
120 /* The insn in that it is defined. */
123 /* Whether it is always executed. */
124 bool always_executed
;
126 /* Whether to move the invariant. */
129 /* Whether the invariant is cheap when used as an address. */
132 /* Cost of the invariant. */
135 /* The invariants it depends on. */
138 /* Used for detecting already visited invariants during determining
139 costs of movements. */
143 /* Currently processed loop. */
144 static struct loop
*curr_loop
;
146 /* Table of invariants indexed by the df_ref uid field. */
148 static unsigned int invariant_table_size
= 0;
149 static struct invariant
** invariant_table
;
151 /* Entry for hash table of invariant expressions. */
153 struct invariant_expr_entry
156 struct invariant
*inv
;
168 /* The actual stamp for marking already visited invariants during determining
169 costs of movements. */
171 static unsigned actual_stamp
;
173 typedef struct invariant
*invariant_p
;
176 /* The invariants. */
178 static vec
<invariant_p
> invariants
;
180 /* Check the size of the invariant table and realloc if necessary. */
183 check_invariant_table_size (void)
185 if (invariant_table_size
< DF_DEFS_TABLE_SIZE ())
187 unsigned int new_size
= DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
188 invariant_table
= XRESIZEVEC (struct invariant
*, invariant_table
, new_size
);
189 memset (&invariant_table
[invariant_table_size
], 0,
190 (new_size
- invariant_table_size
) * sizeof (struct invariant
*));
191 invariant_table_size
= new_size
;
195 /* Test for possibility of invariantness of X. */
198 check_maybe_invariant (rtx x
)
200 enum rtx_code code
= GET_CODE (x
);
214 case UNSPEC_VOLATILE
:
222 /* Load/store motion is done elsewhere. ??? Perhaps also add it here?
223 It should not be hard, and might be faster than "elsewhere". */
225 /* Just handle the most trivial case where we load from an unchanging
226 location (most importantly, pic tables). */
227 if (MEM_READONLY_P (x
) && !MEM_VOLATILE_P (x
))
233 /* Don't mess with insns declared volatile. */
234 if (MEM_VOLATILE_P (x
))
242 fmt
= GET_RTX_FORMAT (code
);
243 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
247 if (!check_maybe_invariant (XEXP (x
, i
)))
250 else if (fmt
[i
] == 'E')
252 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
253 if (!check_maybe_invariant (XVECEXP (x
, i
, j
)))
261 /* Returns the invariant definition for USE, or NULL if USE is not
264 static struct invariant
*
265 invariant_for_use (df_ref use
)
267 struct df_link
*defs
;
269 basic_block bb
= DF_REF_BB (use
), def_bb
;
271 if (DF_REF_FLAGS (use
) & DF_REF_READ_WRITE
)
274 defs
= DF_REF_CHAIN (use
);
275 if (!defs
|| defs
->next
)
278 check_invariant_table_size ();
279 if (!invariant_table
[DF_REF_ID (def
)])
282 def_bb
= DF_REF_BB (def
);
283 if (!dominated_by_p (CDI_DOMINATORS
, bb
, def_bb
))
285 return invariant_table
[DF_REF_ID (def
)];
288 /* Computes hash value for invariant expression X in INSN. */
291 hash_invariant_expr_1 (rtx_insn
*insn
, rtx x
)
293 enum rtx_code code
= GET_CODE (x
);
296 hashval_t val
= code
;
299 struct invariant
*inv
;
307 return hash_rtx (x
, GET_MODE (x
), &do_not_record_p
, NULL
, false);
310 use
= df_find_use (insn
, x
);
312 return hash_rtx (x
, GET_MODE (x
), &do_not_record_p
, NULL
, false);
313 inv
= invariant_for_use (use
);
315 return hash_rtx (x
, GET_MODE (x
), &do_not_record_p
, NULL
, false);
317 gcc_assert (inv
->eqto
!= ~0u);
324 fmt
= GET_RTX_FORMAT (code
);
325 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
328 val
^= hash_invariant_expr_1 (insn
, XEXP (x
, i
));
329 else if (fmt
[i
] == 'E')
331 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
332 val
^= hash_invariant_expr_1 (insn
, XVECEXP (x
, i
, j
));
334 else if (fmt
[i
] == 'i' || fmt
[i
] == 'n')
341 /* Returns true if the invariant expressions E1 and E2 used in insns INSN1
342 and INSN2 have always the same value. */
345 invariant_expr_equal_p (rtx_insn
*insn1
, rtx e1
, rtx_insn
*insn2
, rtx e2
)
347 enum rtx_code code
= GET_CODE (e1
);
351 struct invariant
*inv1
= NULL
, *inv2
= NULL
;
354 /* If mode of only one of the operands is VOIDmode, it is not equivalent to
355 the other one. If both are VOIDmode, we rely on the caller of this
356 function to verify that their modes are the same. */
357 if (code
!= GET_CODE (e2
) || GET_MODE (e1
) != GET_MODE (e2
))
366 return rtx_equal_p (e1
, e2
);
369 use1
= df_find_use (insn1
, e1
);
370 use2
= df_find_use (insn2
, e2
);
372 inv1
= invariant_for_use (use1
);
374 inv2
= invariant_for_use (use2
);
377 return rtx_equal_p (e1
, e2
);
382 gcc_assert (inv1
->eqto
!= ~0u);
383 gcc_assert (inv2
->eqto
!= ~0u);
384 return inv1
->eqto
== inv2
->eqto
;
390 fmt
= GET_RTX_FORMAT (code
);
391 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
398 if (!invariant_expr_equal_p (insn1
, sub1
, insn2
, sub2
))
402 else if (fmt
[i
] == 'E')
404 if (XVECLEN (e1
, i
) != XVECLEN (e2
, i
))
407 for (j
= 0; j
< XVECLEN (e1
, i
); j
++)
409 sub1
= XVECEXP (e1
, i
, j
);
410 sub2
= XVECEXP (e2
, i
, j
);
412 if (!invariant_expr_equal_p (insn1
, sub1
, insn2
, sub2
))
416 else if (fmt
[i
] == 'i' || fmt
[i
] == 'n')
418 if (XINT (e1
, i
) != XINT (e2
, i
))
421 /* Unhandled type of subexpression, we fail conservatively. */
429 struct invariant_expr_hasher
: free_ptr_hash
<invariant_expr_entry
>
431 static inline hashval_t
hash (const invariant_expr_entry
*);
432 static inline bool equal (const invariant_expr_entry
*,
433 const invariant_expr_entry
*);
436 /* Returns hash value for invariant expression entry ENTRY. */
439 invariant_expr_hasher::hash (const invariant_expr_entry
*entry
)
444 /* Compares invariant expression entries ENTRY1 and ENTRY2. */
447 invariant_expr_hasher::equal (const invariant_expr_entry
*entry1
,
448 const invariant_expr_entry
*entry2
)
450 if (entry1
->mode
!= entry2
->mode
)
453 return invariant_expr_equal_p (entry1
->inv
->insn
, entry1
->expr
,
454 entry2
->inv
->insn
, entry2
->expr
);
457 typedef hash_table
<invariant_expr_hasher
> invariant_htab_type
;
459 /* Checks whether invariant with value EXPR in machine mode MODE is
460 recorded in EQ. If this is the case, return the invariant. Otherwise
461 insert INV to the table for this expression and return INV. */
463 static struct invariant
*
464 find_or_insert_inv (invariant_htab_type
*eq
, rtx expr
, machine_mode mode
,
465 struct invariant
*inv
)
467 hashval_t hash
= hash_invariant_expr_1 (inv
->insn
, expr
);
468 struct invariant_expr_entry
*entry
;
469 struct invariant_expr_entry pentry
;
470 invariant_expr_entry
**slot
;
475 slot
= eq
->find_slot_with_hash (&pentry
, hash
, INSERT
);
481 entry
= XNEW (struct invariant_expr_entry
);
491 /* Finds invariants identical to INV and records the equivalence. EQ is the
492 hash table of the invariants. */
495 find_identical_invariants (invariant_htab_type
*eq
, struct invariant
*inv
)
499 struct invariant
*dep
;
502 struct invariant
*tmp
;
504 if (inv
->eqto
!= ~0u)
507 EXECUTE_IF_SET_IN_BITMAP (inv
->depends_on
, 0, depno
, bi
)
509 dep
= invariants
[depno
];
510 find_identical_invariants (eq
, dep
);
513 set
= single_set (inv
->insn
);
514 expr
= SET_SRC (set
);
515 mode
= GET_MODE (expr
);
516 if (mode
== VOIDmode
)
517 mode
= GET_MODE (SET_DEST (set
));
519 tmp
= find_or_insert_inv (eq
, expr
, mode
, inv
);
520 inv
->eqto
= tmp
->invno
;
522 if (tmp
->invno
!= inv
->invno
&& inv
->always_executed
)
525 if (dump_file
&& inv
->eqto
!= inv
->invno
)
527 "Invariant %d is equivalent to invariant %d.\n",
528 inv
->invno
, inv
->eqto
);
531 /* Find invariants with the same value and record the equivalences. */
534 merge_identical_invariants (void)
537 struct invariant
*inv
;
538 invariant_htab_type
eq (invariants
.length ());
540 FOR_EACH_VEC_ELT (invariants
, i
, inv
)
541 find_identical_invariants (&eq
, inv
);
544 /* Determines the basic blocks inside LOOP that are always executed and
545 stores their bitmap to ALWAYS_REACHED. MAY_EXIT is a bitmap of
546 basic blocks that may either exit the loop, or contain the call that
547 does not have to return. BODY is body of the loop obtained by
548 get_loop_body_in_dom_order. */
551 compute_always_reached (struct loop
*loop
, basic_block
*body
,
552 bitmap may_exit
, bitmap always_reached
)
556 for (i
= 0; i
< loop
->num_nodes
; i
++)
558 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, body
[i
]))
559 bitmap_set_bit (always_reached
, i
);
561 if (bitmap_bit_p (may_exit
, i
))
566 /* Finds exits out of the LOOP with body BODY. Marks blocks in that we may
567 exit the loop by cfg edge to HAS_EXIT and MAY_EXIT. In MAY_EXIT
568 additionally mark blocks that may exit due to a call. */
571 find_exits (struct loop
*loop
, basic_block
*body
,
572 bitmap may_exit
, bitmap has_exit
)
577 struct loop
*outermost_exit
= loop
, *aexit
;
578 bool has_call
= false;
581 for (i
= 0; i
< loop
->num_nodes
; i
++)
583 if (body
[i
]->loop_father
== loop
)
585 FOR_BB_INSNS (body
[i
], insn
)
588 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn
)
589 || !RTL_CONST_OR_PURE_CALL_P (insn
)))
592 bitmap_set_bit (may_exit
, i
);
597 FOR_EACH_EDGE (e
, ei
, body
[i
]->succs
)
599 if (flow_bb_inside_loop_p (loop
, e
->dest
))
602 bitmap_set_bit (may_exit
, i
);
603 bitmap_set_bit (has_exit
, i
);
604 outermost_exit
= find_common_loop (outermost_exit
,
605 e
->dest
->loop_father
);
610 /* Use the data stored for the subloop to decide whether we may exit
611 through it. It is sufficient to do this for header of the loop,
612 as other basic blocks inside it must be dominated by it. */
613 if (body
[i
]->loop_father
->header
!= body
[i
])
616 if (LOOP_DATA (body
[i
]->loop_father
)->has_call
)
619 bitmap_set_bit (may_exit
, i
);
621 aexit
= LOOP_DATA (body
[i
]->loop_father
)->outermost_exit
;
624 bitmap_set_bit (may_exit
, i
);
625 bitmap_set_bit (has_exit
, i
);
627 if (flow_loop_nested_p (aexit
, outermost_exit
))
628 outermost_exit
= aexit
;
632 if (loop
->aux
== NULL
)
634 loop
->aux
= xcalloc (1, sizeof (struct loop_data
));
635 bitmap_initialize (&LOOP_DATA (loop
)->regs_ref
, ®_obstack
);
636 bitmap_initialize (&LOOP_DATA (loop
)->regs_live
, ®_obstack
);
638 LOOP_DATA (loop
)->outermost_exit
= outermost_exit
;
639 LOOP_DATA (loop
)->has_call
= has_call
;
642 /* Check whether we may assign a value to X from a register. */
645 may_assign_reg_p (rtx x
)
647 return (GET_MODE (x
) != VOIDmode
648 && GET_MODE (x
) != BLKmode
649 && can_copy_p (GET_MODE (x
))
651 || !HARD_REGISTER_P (x
)
652 || REGNO_REG_CLASS (REGNO (x
)) != NO_REGS
));
655 /* Finds definitions that may correspond to invariants in LOOP with body
659 find_defs (struct loop
*loop
)
664 "*****starting processing of loop %d ******\n",
668 df_remove_problem (df_chain
);
669 df_process_deferred_rescans ();
670 df_chain_add_problem (DF_UD_CHAIN
);
671 df_live_add_problem ();
672 df_live_set_all_dirty ();
673 df_set_flags (DF_RD_PRUNE_DEAD_DEFS
);
674 df_analyze_loop (loop
);
675 check_invariant_table_size ();
679 df_dump_region (dump_file
);
681 "*****ending processing of loop %d ******\n",
686 /* Creates a new invariant for definition DEF in INSN, depending on invariants
687 in DEPENDS_ON. ALWAYS_EXECUTED is true if the insn is always executed,
688 unless the program ends due to a function call. The newly created invariant
691 static struct invariant
*
692 create_new_invariant (struct def
*def
, rtx_insn
*insn
, bitmap depends_on
,
693 bool always_executed
)
695 struct invariant
*inv
= XNEW (struct invariant
);
696 rtx set
= single_set (insn
);
697 bool speed
= optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn
));
700 inv
->always_executed
= always_executed
;
701 inv
->depends_on
= depends_on
;
703 /* If the set is simple, usually by moving it we move the whole store out of
704 the loop. Otherwise we save only cost of the computation. */
707 inv
->cost
= set_rtx_cost (set
, speed
);
708 /* ??? Try to determine cheapness of address computation. Unfortunately
709 the address cost is only a relative measure, we can't really compare
710 it with any absolute number, but only with other address costs.
711 But here we don't have any other addresses, so compare with a magic
712 number anyway. It has to be large enough to not regress PR33928
713 (by avoiding to move reg+8,reg+16,reg+24 invariants), but small
714 enough to not regress 410.bwaves either (by still moving reg+reg
716 See http://gcc.gnu.org/ml/gcc-patches/2009-10/msg01210.html . */
717 if (SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
))))
718 inv
->cheap_address
= address_cost (SET_SRC (set
), word_mode
,
719 ADDR_SPACE_GENERIC
, speed
) < 3;
721 inv
->cheap_address
= false;
725 inv
->cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)),
727 inv
->cheap_address
= false;
732 inv
->orig_regno
= -1;
736 inv
->invno
= invariants
.length ();
743 def
->invno
= inv
->invno
;
744 invariants
.safe_push (inv
);
749 "Set in insn %d is invariant (%d), cost %d, depends on ",
750 INSN_UID (insn
), inv
->invno
, inv
->cost
);
751 dump_bitmap (dump_file
, inv
->depends_on
);
757 /* Given invariant DEF and its address USE, check if the corresponding
758 invariant expr can be propagated into the use or not. */
761 inv_can_prop_to_addr_use (struct def
*def
, df_ref use
)
763 struct invariant
*inv
;
764 rtx
*pos
= DF_REF_REAL_LOC (use
), def_set
;
765 rtx_insn
*use_insn
= DF_REF_INSN (use
);
769 inv
= invariants
[def
->invno
];
770 /* No need to check if address expression is expensive. */
771 if (!inv
->cheap_address
)
774 def_insn
= inv
->insn
;
775 def_set
= single_set (def_insn
);
779 validate_unshare_change (use_insn
, pos
, SET_SRC (def_set
), true);
780 ok
= verify_changes (0);
785 /* Record USE at DEF. */
788 record_use (struct def
*def
, df_ref use
)
790 struct use
*u
= XNEW (struct use
);
792 u
->pos
= DF_REF_REAL_LOC (use
);
793 u
->insn
= DF_REF_INSN (use
);
794 u
->addr_use_p
= (DF_REF_TYPE (use
) == DF_REF_REG_MEM_LOAD
795 || DF_REF_TYPE (use
) == DF_REF_REG_MEM_STORE
);
801 /* Initialize propagation information if this is the first addr
802 use of the inv def. */
803 if (def
->n_addr_uses
== 0)
804 def
->can_prop_to_addr_uses
= true;
807 if (def
->can_prop_to_addr_uses
&& !inv_can_prop_to_addr_use (def
, use
))
808 def
->can_prop_to_addr_uses
= false;
812 /* Finds the invariants USE depends on and store them to the DEPENDS_ON
813 bitmap. Returns true if all dependencies of USE are known to be
814 loop invariants, false otherwise. */
817 check_dependency (basic_block bb
, df_ref use
, bitmap depends_on
)
821 struct df_link
*defs
;
822 struct def
*def_data
;
823 struct invariant
*inv
;
825 if (DF_REF_FLAGS (use
) & DF_REF_READ_WRITE
)
828 defs
= DF_REF_CHAIN (use
);
831 unsigned int regno
= DF_REF_REGNO (use
);
833 /* If this is the use of an uninitialized argument register that is
834 likely to be spilled, do not move it lest this might extend its
835 lifetime and cause reload to die. This can occur for a call to
836 a function taking complex number arguments and moving the insns
837 preparing the arguments without moving the call itself wouldn't
838 gain much in practice. */
839 if ((DF_REF_FLAGS (use
) & DF_HARD_REG_LIVE
)
840 && FUNCTION_ARG_REGNO_P (regno
)
841 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
)))
851 check_invariant_table_size ();
852 inv
= invariant_table
[DF_REF_ID (def
)];
857 gcc_assert (def_data
!= NULL
);
859 def_bb
= DF_REF_BB (def
);
860 /* Note that in case bb == def_bb, we know that the definition
861 dominates insn, because def has invariant_table[DF_REF_ID(def)]
862 defined and we process the insns in the basic block bb
864 if (!dominated_by_p (CDI_DOMINATORS
, bb
, def_bb
))
867 bitmap_set_bit (depends_on
, def_data
->invno
);
872 /* Finds the invariants INSN depends on and store them to the DEPENDS_ON
873 bitmap. Returns true if all dependencies of INSN are known to be
874 loop invariants, false otherwise. */
877 check_dependencies (rtx_insn
*insn
, bitmap depends_on
)
879 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
881 basic_block bb
= BLOCK_FOR_INSN (insn
);
883 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
884 if (!check_dependency (bb
, use
, depends_on
))
886 FOR_EACH_INSN_INFO_EQ_USE (use
, insn_info
)
887 if (!check_dependency (bb
, use
, depends_on
))
893 /* Pre-check candidate DEST to skip the one which can not make a valid insn
894 during move_invariant_reg. SIMPLE is to skip HARD_REGISTER. */
896 pre_check_invariant_p (bool simple
, rtx dest
)
898 if (simple
&& REG_P (dest
) && DF_REG_DEF_COUNT (REGNO (dest
)) > 1)
901 unsigned int i
= REGNO (dest
);
902 struct df_insn_info
*insn_info
;
905 for (use
= DF_REG_USE_CHAIN (i
); use
; use
= DF_REF_NEXT_REG (use
))
907 rtx_insn
*ref
= DF_REF_INSN (use
);
908 insn_info
= DF_INSN_INFO_GET (ref
);
910 FOR_EACH_INSN_INFO_DEF (def_rec
, insn_info
)
911 if (DF_REF_REGNO (def_rec
) == i
)
913 /* Multi definitions at this stage, most likely are due to
914 instruction constraints, which requires both read and write
915 on the same register. Since move_invariant_reg is not
916 powerful enough to handle such cases, just ignore the INV
917 and leave the chance to others. */
925 /* Finds invariant in INSN. ALWAYS_REACHED is true if the insn is always
926 executed. ALWAYS_EXECUTED is true if the insn is always executed,
927 unless the program ends due to a function call. */
930 find_invariant_insn (rtx_insn
*insn
, bool always_reached
, bool always_executed
)
937 struct invariant
*inv
;
939 /* We can't move a CC0 setter without the user. */
940 if (HAVE_cc0
&& sets_cc0_p (insn
))
943 set
= single_set (insn
);
946 dest
= SET_DEST (set
);
949 || HARD_REGISTER_P (dest
))
952 if (!may_assign_reg_p (dest
)
953 || !pre_check_invariant_p (simple
, dest
)
954 || !check_maybe_invariant (SET_SRC (set
)))
957 /* If the insn can throw exception, we cannot move it at all without changing
959 if (can_throw_internal (insn
))
962 /* We cannot make trapping insn executed, unless it was executed before. */
963 if (may_trap_or_fault_p (PATTERN (insn
)) && !always_reached
)
966 depends_on
= BITMAP_ALLOC (NULL
);
967 if (!check_dependencies (insn
, depends_on
))
969 BITMAP_FREE (depends_on
);
974 def
= XCNEW (struct def
);
978 inv
= create_new_invariant (def
, insn
, depends_on
, always_executed
);
982 ref
= df_find_def (insn
, dest
);
983 check_invariant_table_size ();
984 invariant_table
[DF_REF_ID (ref
)] = inv
;
988 /* Record registers used in INSN that have a unique invariant definition. */
991 record_uses (rtx_insn
*insn
)
993 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
995 struct invariant
*inv
;
997 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
999 inv
= invariant_for_use (use
);
1001 record_use (inv
->def
, use
);
1003 FOR_EACH_INSN_INFO_EQ_USE (use
, insn_info
)
1005 inv
= invariant_for_use (use
);
1007 record_use (inv
->def
, use
);
1011 /* Finds invariants in INSN. ALWAYS_REACHED is true if the insn is always
1012 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1013 unless the program ends due to a function call. */
1016 find_invariants_insn (rtx_insn
*insn
, bool always_reached
, bool always_executed
)
1018 find_invariant_insn (insn
, always_reached
, always_executed
);
1022 /* Finds invariants in basic block BB. ALWAYS_REACHED is true if the
1023 basic block is always executed. ALWAYS_EXECUTED is true if the basic
1024 block is always executed, unless the program ends due to a function
1028 find_invariants_bb (basic_block bb
, bool always_reached
, bool always_executed
)
1032 FOR_BB_INSNS (bb
, insn
)
1034 if (!NONDEBUG_INSN_P (insn
))
1037 find_invariants_insn (insn
, always_reached
, always_executed
);
1041 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn
)
1042 || ! RTL_CONST_OR_PURE_CALL_P (insn
)))
1043 always_reached
= false;
1047 /* Finds invariants in LOOP with body BODY. ALWAYS_REACHED is the bitmap of
1048 basic blocks in BODY that are always executed. ALWAYS_EXECUTED is the
1049 bitmap of basic blocks in BODY that are always executed unless the program
1050 ends due to a function call. */
1053 find_invariants_body (struct loop
*loop
, basic_block
*body
,
1054 bitmap always_reached
, bitmap always_executed
)
1058 for (i
= 0; i
< loop
->num_nodes
; i
++)
1059 find_invariants_bb (body
[i
],
1060 bitmap_bit_p (always_reached
, i
),
1061 bitmap_bit_p (always_executed
, i
));
1064 /* Finds invariants in LOOP. */
1067 find_invariants (struct loop
*loop
)
1069 bitmap may_exit
= BITMAP_ALLOC (NULL
);
1070 bitmap always_reached
= BITMAP_ALLOC (NULL
);
1071 bitmap has_exit
= BITMAP_ALLOC (NULL
);
1072 bitmap always_executed
= BITMAP_ALLOC (NULL
);
1073 basic_block
*body
= get_loop_body_in_dom_order (loop
);
1075 find_exits (loop
, body
, may_exit
, has_exit
);
1076 compute_always_reached (loop
, body
, may_exit
, always_reached
);
1077 compute_always_reached (loop
, body
, has_exit
, always_executed
);
1080 find_invariants_body (loop
, body
, always_reached
, always_executed
);
1081 merge_identical_invariants ();
1083 BITMAP_FREE (always_reached
);
1084 BITMAP_FREE (always_executed
);
1085 BITMAP_FREE (may_exit
);
1086 BITMAP_FREE (has_exit
);
1090 /* Frees a list of uses USE. */
1093 free_use_list (struct use
*use
)
1097 for (; use
; use
= next
)
1104 /* Return pressure class and number of hard registers (through *NREGS)
1105 for destination of INSN. */
1106 static enum reg_class
1107 get_pressure_class_and_nregs (rtx_insn
*insn
, int *nregs
)
1110 enum reg_class pressure_class
;
1111 rtx set
= single_set (insn
);
1113 /* Considered invariant insns have only one set. */
1114 gcc_assert (set
!= NULL_RTX
);
1115 reg
= SET_DEST (set
);
1116 if (GET_CODE (reg
) == SUBREG
)
1117 reg
= SUBREG_REG (reg
);
1121 pressure_class
= NO_REGS
;
1127 if (reg
== NULL_RTX
)
1128 pressure_class
= GENERAL_REGS
;
1131 pressure_class
= reg_allocno_class (REGNO (reg
));
1132 pressure_class
= ira_pressure_class_translate
[pressure_class
];
1135 = ira_reg_class_max_nregs
[pressure_class
][GET_MODE (SET_SRC (set
))];
1137 return pressure_class
;
1140 /* Calculates cost and number of registers needed for moving invariant INV
1141 out of the loop and stores them to *COST and *REGS_NEEDED. *CL will be
1142 the REG_CLASS of INV. Return
1143 -1: if INV is invalid.
1144 0: if INV and its depends_on have same reg_class
1145 1: if INV and its depends_on have different reg_classes. */
1148 get_inv_cost (struct invariant
*inv
, int *comp_cost
, unsigned *regs_needed
,
1152 unsigned aregs_needed
[N_REG_CLASSES
];
1154 struct invariant
*dep
;
1158 /* Find the representative of the class of the equivalent invariants. */
1159 inv
= invariants
[inv
->eqto
];
1162 if (! flag_ira_loop_pressure
)
1166 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1167 regs_needed
[ira_pressure_classes
[i
]] = 0;
1171 || inv
->stamp
== actual_stamp
)
1173 inv
->stamp
= actual_stamp
;
1175 if (! flag_ira_loop_pressure
)
1180 enum reg_class pressure_class
;
1182 pressure_class
= get_pressure_class_and_nregs (inv
->insn
, &nregs
);
1183 regs_needed
[pressure_class
] += nregs
;
1184 *cl
= pressure_class
;
1188 if (!inv
->cheap_address
1189 || inv
->def
->n_uses
== 0
1190 || inv
->def
->n_addr_uses
< inv
->def
->n_uses
1191 /* Count cost if the inv can't be propagated into address uses. */
1192 || !inv
->def
->can_prop_to_addr_uses
)
1193 (*comp_cost
) += inv
->cost
* inv
->eqno
;
1197 /* Hoisting constant pool constants into stack regs may cost more than
1198 just single register. On x87, the balance is affected both by the
1199 small number of FP registers, and by its register stack organization,
1200 that forces us to add compensation code in and around the loop to
1201 shuffle the operands to the top of stack before use, and pop them
1202 from the stack after the loop finishes.
1204 To model this effect, we increase the number of registers needed for
1205 stack registers by two: one register push, and one register pop.
1206 This usually has the effect that FP constant loads from the constant
1207 pool are not moved out of the loop.
1209 Note that this also means that dependent invariants can not be moved.
1210 However, the primary purpose of this pass is to move loop invariant
1211 address arithmetic out of loops, and address arithmetic that depends
1212 on floating point constants is unlikely to ever occur. */
1213 rtx set
= single_set (inv
->insn
);
1215 && IS_STACK_MODE (GET_MODE (SET_SRC (set
)))
1216 && constant_pool_constant_p (SET_SRC (set
)))
1218 if (flag_ira_loop_pressure
)
1219 regs_needed
[ira_stack_reg_pressure_class
] += 2;
1221 regs_needed
[0] += 2;
1226 EXECUTE_IF_SET_IN_BITMAP (inv
->depends_on
, 0, depno
, bi
)
1229 enum reg_class dep_cl
= ALL_REGS
;
1232 dep
= invariants
[depno
];
1234 /* If DEP is moved out of the loop, it is not a depends_on any more. */
1238 dep_ret
= get_inv_cost (dep
, &acomp_cost
, aregs_needed
, &dep_cl
);
1240 if (! flag_ira_loop_pressure
)
1241 check_p
= aregs_needed
[0] != 0;
1244 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1245 if (aregs_needed
[ira_pressure_classes
[i
]] != 0)
1247 check_p
= i
< ira_pressure_classes_num
;
1249 if ((dep_ret
== 1) || ((dep_ret
== 0) && (*cl
!= dep_cl
)))
1256 /* We need to check always_executed, since if the original value of
1257 the invariant may be preserved, we may need to keep it in a
1258 separate register. TODO check whether the register has an
1259 use outside of the loop. */
1260 && dep
->always_executed
1261 && !dep
->def
->uses
->next
)
1263 /* If this is a single use, after moving the dependency we will not
1264 need a new register. */
1265 if (! flag_ira_loop_pressure
)
1270 enum reg_class pressure_class
;
1272 pressure_class
= get_pressure_class_and_nregs (inv
->insn
, &nregs
);
1273 aregs_needed
[pressure_class
] -= nregs
;
1277 if (! flag_ira_loop_pressure
)
1278 regs_needed
[0] += aregs_needed
[0];
1281 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1282 regs_needed
[ira_pressure_classes
[i
]]
1283 += aregs_needed
[ira_pressure_classes
[i
]];
1285 (*comp_cost
) += acomp_cost
;
1290 /* Calculates gain for eliminating invariant INV. REGS_USED is the number
1291 of registers used in the loop, NEW_REGS is the number of new variables
1292 already added due to the invariant motion. The number of registers needed
1293 for it is stored in *REGS_NEEDED. SPEED and CALL_P are flags passed
1294 through to estimate_reg_pressure_cost. */
1297 gain_for_invariant (struct invariant
*inv
, unsigned *regs_needed
,
1298 unsigned *new_regs
, unsigned regs_used
,
1299 bool speed
, bool call_p
)
1301 int comp_cost
, size_cost
;
1302 /* Workaround -Wmaybe-uninitialized false positive during
1303 profiledbootstrap by initializing it. */
1304 enum reg_class cl
= NO_REGS
;
1309 ret
= get_inv_cost (inv
, &comp_cost
, regs_needed
, &cl
);
1311 if (! flag_ira_loop_pressure
)
1313 size_cost
= (estimate_reg_pressure_cost (new_regs
[0] + regs_needed
[0],
1314 regs_used
, speed
, call_p
)
1315 - estimate_reg_pressure_cost (new_regs
[0],
1316 regs_used
, speed
, call_p
));
1320 else if ((ret
== 0) && (cl
== NO_REGS
))
1321 /* Hoist it anyway since it does not impact register pressure. */
1326 enum reg_class pressure_class
;
1328 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1330 pressure_class
= ira_pressure_classes
[i
];
1332 if (!reg_classes_intersect_p (pressure_class
, cl
))
1335 if ((int) new_regs
[pressure_class
]
1336 + (int) regs_needed
[pressure_class
]
1337 + LOOP_DATA (curr_loop
)->max_reg_pressure
[pressure_class
]
1338 + IRA_LOOP_RESERVED_REGS
1339 > ira_class_hard_regs_num
[pressure_class
])
1342 if (i
< ira_pressure_classes_num
)
1343 /* There will be register pressure excess and we want not to
1344 make this loop invariant motion. All loop invariants with
1345 non-positive gains will be rejected in function
1346 find_invariants_to_move. Therefore we return the negative
1349 One could think that this rejects also expensive loop
1350 invariant motions and this will hurt code performance.
1351 However numerous experiments with different heuristics
1352 taking invariant cost into account did not confirm this
1353 assumption. There are possible explanations for this
1355 o probably all expensive invariants were already moved out
1356 of the loop by PRE and gimple invariant motion pass.
1357 o expensive invariant execution will be hidden by insn
1358 scheduling or OOO processor hardware because usually such
1359 invariants have a lot of freedom to be executed
1361 Another reason for ignoring invariant cost vs spilling cost
1362 heuristics is also in difficulties to evaluate accurately
1363 spill cost at this stage. */
1369 return comp_cost
- size_cost
;
1372 /* Finds invariant with best gain for moving. Returns the gain, stores
1373 the invariant in *BEST and number of registers needed for it to
1374 *REGS_NEEDED. REGS_USED is the number of registers used in the loop.
1375 NEW_REGS is the number of new variables already added due to invariant
1379 best_gain_for_invariant (struct invariant
**best
, unsigned *regs_needed
,
1380 unsigned *new_regs
, unsigned regs_used
,
1381 bool speed
, bool call_p
)
1383 struct invariant
*inv
;
1384 int i
, gain
= 0, again
;
1385 unsigned aregs_needed
[N_REG_CLASSES
], invno
;
1387 FOR_EACH_VEC_ELT (invariants
, invno
, inv
)
1392 /* Only consider the "representatives" of equivalent invariants. */
1393 if (inv
->eqto
!= inv
->invno
)
1396 again
= gain_for_invariant (inv
, aregs_needed
, new_regs
, regs_used
,
1402 if (! flag_ira_loop_pressure
)
1403 regs_needed
[0] = aregs_needed
[0];
1406 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1407 regs_needed
[ira_pressure_classes
[i
]]
1408 = aregs_needed
[ira_pressure_classes
[i
]];
1416 /* Marks invariant INVNO and all its dependencies for moving. */
1419 set_move_mark (unsigned invno
, int gain
)
1421 struct invariant
*inv
= invariants
[invno
];
1424 /* Find the representative of the class of the equivalent invariants. */
1425 inv
= invariants
[inv
->eqto
];
1434 fprintf (dump_file
, "Decided to move invariant %d -- gain %d\n",
1437 fprintf (dump_file
, "Decided to move dependent invariant %d\n",
1441 EXECUTE_IF_SET_IN_BITMAP (inv
->depends_on
, 0, invno
, bi
)
1443 set_move_mark (invno
, -1);
1447 /* Determines which invariants to move. */
1450 find_invariants_to_move (bool speed
, bool call_p
)
1453 unsigned i
, regs_used
, regs_needed
[N_REG_CLASSES
], new_regs
[N_REG_CLASSES
];
1454 struct invariant
*inv
= NULL
;
1456 if (!invariants
.length ())
1459 if (flag_ira_loop_pressure
)
1460 /* REGS_USED is actually never used when the flag is on. */
1463 /* We do not really do a good job in estimating number of
1464 registers used; we put some initial bound here to stand for
1465 induction variables etc. that we do not detect. */
1467 unsigned int n_regs
= DF_REG_SIZE (df
);
1471 for (i
= 0; i
< n_regs
; i
++)
1473 if (!DF_REGNO_FIRST_DEF (i
) && DF_REGNO_LAST_USE (i
))
1475 /* This is a value that is used but not changed inside loop. */
1481 if (! flag_ira_loop_pressure
)
1482 new_regs
[0] = regs_needed
[0] = 0;
1485 for (i
= 0; (int) i
< ira_pressure_classes_num
; i
++)
1486 new_regs
[ira_pressure_classes
[i
]] = 0;
1488 while ((gain
= best_gain_for_invariant (&inv
, regs_needed
,
1489 new_regs
, regs_used
,
1490 speed
, call_p
)) > 0)
1492 set_move_mark (inv
->invno
, gain
);
1493 if (! flag_ira_loop_pressure
)
1494 new_regs
[0] += regs_needed
[0];
1497 for (i
= 0; (int) i
< ira_pressure_classes_num
; i
++)
1498 new_regs
[ira_pressure_classes
[i
]]
1499 += regs_needed
[ira_pressure_classes
[i
]];
1504 /* Replace the uses, reached by the definition of invariant INV, by REG.
1506 IN_GROUP is nonzero if this is part of a group of changes that must be
1507 performed as a group. In that case, the changes will be stored. The
1508 function `apply_change_group' will validate and apply the changes. */
1511 replace_uses (struct invariant
*inv
, rtx reg
, bool in_group
)
1513 /* Replace the uses we know to be dominated. It saves work for copy
1514 propagation, and also it is necessary so that dependent invariants
1515 are computed right. */
1519 for (use
= inv
->def
->uses
; use
; use
= use
->next
)
1520 validate_change (use
->insn
, use
->pos
, reg
, true);
1522 /* If we aren't part of a larger group, apply the changes now. */
1524 return apply_change_group ();
1530 /* Whether invariant INV setting REG can be moved out of LOOP, at the end of
1531 the block preceding its header. */
1534 can_move_invariant_reg (struct loop
*loop
, struct invariant
*inv
, rtx reg
)
1537 unsigned int dest_regno
, defs_in_loop_count
= 0;
1538 rtx_insn
*insn
= inv
->insn
;
1539 basic_block bb
= BLOCK_FOR_INSN (inv
->insn
);
1541 /* We ignore hard register and memory access for cost and complexity reasons.
1542 Hard register are few at this stage and expensive to consider as they
1543 require building a separate data flow. Memory access would require using
1544 df_simulate_* and can_move_insns_across functions and is more complex. */
1545 if (!REG_P (reg
) || HARD_REGISTER_P (reg
))
1548 /* Check whether the set is always executed. We could omit this condition if
1549 we know that the register is unused outside of the loop, but it does not
1550 seem worth finding out. */
1551 if (!inv
->always_executed
)
1554 /* Check that all uses that would be dominated by def are already dominated
1556 dest_regno
= REGNO (reg
);
1557 for (use
= DF_REG_USE_CHAIN (dest_regno
); use
; use
= DF_REF_NEXT_REG (use
))
1562 use_insn
= DF_REF_INSN (use
);
1563 use_bb
= BLOCK_FOR_INSN (use_insn
);
1565 /* Ignore instruction considered for moving. */
1566 if (use_insn
== insn
)
1569 /* Don't consider uses outside loop. */
1570 if (!flow_bb_inside_loop_p (loop
, use_bb
))
1573 /* Don't move if a use is not dominated by def in insn. */
1574 if (use_bb
== bb
&& DF_INSN_LUID (insn
) >= DF_INSN_LUID (use_insn
))
1576 if (!dominated_by_p (CDI_DOMINATORS
, use_bb
, bb
))
1580 /* Check for other defs. Any other def in the loop might reach a use
1581 currently reached by the def in insn. */
1582 for (def
= DF_REG_DEF_CHAIN (dest_regno
); def
; def
= DF_REF_NEXT_REG (def
))
1584 basic_block def_bb
= DF_REF_BB (def
);
1586 /* Defs in exit block cannot reach a use they weren't already. */
1587 if (single_succ_p (def_bb
))
1589 basic_block def_bb_succ
;
1591 def_bb_succ
= single_succ (def_bb
);
1592 if (!flow_bb_inside_loop_p (loop
, def_bb_succ
))
1596 if (++defs_in_loop_count
> 1)
1603 /* Move invariant INVNO out of the LOOP. Returns true if this succeeds, false
1607 move_invariant_reg (struct loop
*loop
, unsigned invno
)
1609 struct invariant
*inv
= invariants
[invno
];
1610 struct invariant
*repr
= invariants
[inv
->eqto
];
1612 basic_block preheader
= loop_preheader_edge (loop
)->src
;
1613 rtx reg
, set
, dest
, note
;
1622 /* If this is a representative of the class of equivalent invariants,
1623 really move the invariant. Otherwise just replace its use with
1624 the register used for the representative. */
1627 if (inv
->depends_on
)
1629 EXECUTE_IF_SET_IN_BITMAP (inv
->depends_on
, 0, i
, bi
)
1631 if (!move_invariant_reg (loop
, i
))
1636 /* If possible, just move the set out of the loop. Otherwise, we
1637 need to create a temporary register. */
1638 set
= single_set (inv
->insn
);
1639 reg
= dest
= SET_DEST (set
);
1640 if (GET_CODE (reg
) == SUBREG
)
1641 reg
= SUBREG_REG (reg
);
1643 regno
= REGNO (reg
);
1645 if (!can_move_invariant_reg (loop
, inv
, dest
))
1647 reg
= gen_reg_rtx_and_attrs (dest
);
1649 /* Try replacing the destination by a new pseudoregister. */
1650 validate_change (inv
->insn
, &SET_DEST (set
), reg
, true);
1652 /* As well as all the dominated uses. */
1653 replace_uses (inv
, reg
, true);
1655 /* And validate all the changes. */
1656 if (!apply_change_group ())
1659 emit_insn_after (gen_move_insn (dest
, reg
), inv
->insn
);
1662 fprintf (dump_file
, "Invariant %d moved without introducing a new "
1663 "temporary register\n", invno
);
1664 reorder_insns (inv
->insn
, inv
->insn
, BB_END (preheader
));
1665 df_recompute_luids (preheader
);
1667 /* If there is a REG_EQUAL note on the insn we just moved, and the
1668 insn is in a basic block that is not always executed or the note
1669 contains something for which we don't know the invariant status,
1670 the note may no longer be valid after we move the insn. Note that
1671 uses in REG_EQUAL notes are taken into account in the computation
1672 of invariants, so it is safe to retain the note even if it contains
1673 register references for which we know the invariant status. */
1674 if ((note
= find_reg_note (inv
->insn
, REG_EQUAL
, NULL_RTX
))
1675 && (!inv
->always_executed
1676 || !check_maybe_invariant (XEXP (note
, 0))))
1677 remove_note (inv
->insn
, note
);
1681 if (!move_invariant_reg (loop
, repr
->invno
))
1684 regno
= repr
->orig_regno
;
1685 if (!replace_uses (inv
, reg
, false))
1687 set
= single_set (inv
->insn
);
1688 emit_insn_after (gen_move_insn (SET_DEST (set
), reg
), inv
->insn
);
1689 delete_insn (inv
->insn
);
1693 inv
->orig_regno
= regno
;
1698 /* If we failed, clear move flag, so that we do not try to move inv
1701 fprintf (dump_file
, "Failed to move invariant %d\n", invno
);
1703 inv
->reg
= NULL_RTX
;
1704 inv
->orig_regno
= -1;
1709 /* Move selected invariant out of the LOOP. Newly created regs are marked
1710 in TEMPORARY_REGS. */
1713 move_invariants (struct loop
*loop
)
1715 struct invariant
*inv
;
1718 FOR_EACH_VEC_ELT (invariants
, i
, inv
)
1719 move_invariant_reg (loop
, i
);
1720 if (flag_ira_loop_pressure
&& resize_reg_info ())
1722 FOR_EACH_VEC_ELT (invariants
, i
, inv
)
1723 if (inv
->reg
!= NULL_RTX
)
1725 if (inv
->orig_regno
>= 0)
1726 setup_reg_classes (REGNO (inv
->reg
),
1727 reg_preferred_class (inv
->orig_regno
),
1728 reg_alternate_class (inv
->orig_regno
),
1729 reg_allocno_class (inv
->orig_regno
));
1731 setup_reg_classes (REGNO (inv
->reg
),
1732 GENERAL_REGS
, NO_REGS
, GENERAL_REGS
);
1737 /* Initializes invariant motion data. */
1740 init_inv_motion_data (void)
1744 invariants
.create (100);
1747 /* Frees the data allocated by invariant motion. */
1750 free_inv_motion_data (void)
1754 struct invariant
*inv
;
1756 check_invariant_table_size ();
1757 for (i
= 0; i
< DF_DEFS_TABLE_SIZE (); i
++)
1759 inv
= invariant_table
[i
];
1763 gcc_assert (def
!= NULL
);
1765 free_use_list (def
->uses
);
1767 invariant_table
[i
] = NULL
;
1771 FOR_EACH_VEC_ELT (invariants
, i
, inv
)
1773 BITMAP_FREE (inv
->depends_on
);
1776 invariants
.release ();
1779 /* Move the invariants out of the LOOP. */
1782 move_single_loop_invariants (struct loop
*loop
)
1784 init_inv_motion_data ();
1786 find_invariants (loop
);
1787 find_invariants_to_move (optimize_loop_for_speed_p (loop
),
1788 LOOP_DATA (loop
)->has_call
);
1789 move_invariants (loop
);
1791 free_inv_motion_data ();
1794 /* Releases the auxiliary data for LOOP. */
1797 free_loop_data (struct loop
*loop
)
1799 struct loop_data
*data
= LOOP_DATA (loop
);
1803 bitmap_clear (&LOOP_DATA (loop
)->regs_ref
);
1804 bitmap_clear (&LOOP_DATA (loop
)->regs_live
);
1811 /* Registers currently living. */
1812 static bitmap_head curr_regs_live
;
1814 /* Current reg pressure for each pressure class. */
1815 static int curr_reg_pressure
[N_REG_CLASSES
];
1817 /* Record all regs that are set in any one insn. Communication from
1818 mark_reg_{store,clobber} and global_conflicts. Asm can refer to
1819 all hard-registers. */
1820 static rtx regs_set
[(FIRST_PSEUDO_REGISTER
> MAX_RECOG_OPERANDS
1821 ? FIRST_PSEUDO_REGISTER
: MAX_RECOG_OPERANDS
) * 2];
1822 /* Number of regs stored in the previous array. */
1823 static int n_regs_set
;
1825 /* Return pressure class and number of needed hard registers (through
1826 *NREGS) of register REGNO. */
1827 static enum reg_class
1828 get_regno_pressure_class (int regno
, int *nregs
)
1830 if (regno
>= FIRST_PSEUDO_REGISTER
)
1832 enum reg_class pressure_class
;
1834 pressure_class
= reg_allocno_class (regno
);
1835 pressure_class
= ira_pressure_class_translate
[pressure_class
];
1837 = ira_reg_class_max_nregs
[pressure_class
][PSEUDO_REGNO_MODE (regno
)];
1838 return pressure_class
;
1840 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
)
1841 && ! TEST_HARD_REG_BIT (eliminable_regset
, regno
))
1844 return ira_pressure_class_translate
[REGNO_REG_CLASS (regno
)];
1853 /* Increase (if INCR_P) or decrease current register pressure for
1856 change_pressure (int regno
, bool incr_p
)
1859 enum reg_class pressure_class
;
1861 pressure_class
= get_regno_pressure_class (regno
, &nregs
);
1863 curr_reg_pressure
[pressure_class
] -= nregs
;
1866 curr_reg_pressure
[pressure_class
] += nregs
;
1867 if (LOOP_DATA (curr_loop
)->max_reg_pressure
[pressure_class
]
1868 < curr_reg_pressure
[pressure_class
])
1869 LOOP_DATA (curr_loop
)->max_reg_pressure
[pressure_class
]
1870 = curr_reg_pressure
[pressure_class
];
1874 /* Mark REGNO birth. */
1876 mark_regno_live (int regno
)
1880 for (loop
= curr_loop
;
1881 loop
!= current_loops
->tree_root
;
1882 loop
= loop_outer (loop
))
1883 bitmap_set_bit (&LOOP_DATA (loop
)->regs_live
, regno
);
1884 if (!bitmap_set_bit (&curr_regs_live
, regno
))
1886 change_pressure (regno
, true);
1889 /* Mark REGNO death. */
1891 mark_regno_death (int regno
)
1893 if (! bitmap_clear_bit (&curr_regs_live
, regno
))
1895 change_pressure (regno
, false);
1898 /* Mark setting register REG. */
1900 mark_reg_store (rtx reg
, const_rtx setter ATTRIBUTE_UNUSED
,
1901 void *data ATTRIBUTE_UNUSED
)
1903 if (GET_CODE (reg
) == SUBREG
)
1904 reg
= SUBREG_REG (reg
);
1909 regs_set
[n_regs_set
++] = reg
;
1911 unsigned int end_regno
= END_REGNO (reg
);
1912 for (unsigned int regno
= REGNO (reg
); regno
< end_regno
; ++regno
)
1913 mark_regno_live (regno
);
1916 /* Mark clobbering register REG. */
1918 mark_reg_clobber (rtx reg
, const_rtx setter
, void *data
)
1920 if (GET_CODE (setter
) == CLOBBER
)
1921 mark_reg_store (reg
, setter
, data
);
1924 /* Mark register REG death. */
1926 mark_reg_death (rtx reg
)
1928 unsigned int end_regno
= END_REGNO (reg
);
1929 for (unsigned int regno
= REGNO (reg
); regno
< end_regno
; ++regno
)
1930 mark_regno_death (regno
);
1933 /* Mark occurrence of registers in X for the current loop. */
1935 mark_ref_regs (rtx x
)
1944 code
= GET_CODE (x
);
1949 for (loop
= curr_loop
;
1950 loop
!= current_loops
->tree_root
;
1951 loop
= loop_outer (loop
))
1952 bitmap_set_bit (&LOOP_DATA (loop
)->regs_ref
, REGNO (x
));
1956 fmt
= GET_RTX_FORMAT (code
);
1957 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1959 mark_ref_regs (XEXP (x
, i
));
1960 else if (fmt
[i
] == 'E')
1964 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1965 mark_ref_regs (XVECEXP (x
, i
, j
));
1969 /* Calculate register pressure in the loops. */
1971 calculate_loop_reg_pressure (void)
1979 struct loop
*loop
, *parent
;
1981 FOR_EACH_LOOP (loop
, 0)
1982 if (loop
->aux
== NULL
)
1984 loop
->aux
= xcalloc (1, sizeof (struct loop_data
));
1985 bitmap_initialize (&LOOP_DATA (loop
)->regs_ref
, ®_obstack
);
1986 bitmap_initialize (&LOOP_DATA (loop
)->regs_live
, ®_obstack
);
1988 ira_setup_eliminable_regset ();
1989 bitmap_initialize (&curr_regs_live
, ®_obstack
);
1990 FOR_EACH_BB_FN (bb
, cfun
)
1992 curr_loop
= bb
->loop_father
;
1993 if (curr_loop
== current_loops
->tree_root
)
1996 for (loop
= curr_loop
;
1997 loop
!= current_loops
->tree_root
;
1998 loop
= loop_outer (loop
))
1999 bitmap_ior_into (&LOOP_DATA (loop
)->regs_live
, DF_LR_IN (bb
));
2001 bitmap_copy (&curr_regs_live
, DF_LR_IN (bb
));
2002 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2003 curr_reg_pressure
[ira_pressure_classes
[i
]] = 0;
2004 EXECUTE_IF_SET_IN_BITMAP (&curr_regs_live
, 0, j
, bi
)
2005 change_pressure (j
, true);
2007 FOR_BB_INSNS (bb
, insn
)
2009 if (! NONDEBUG_INSN_P (insn
))
2012 mark_ref_regs (PATTERN (insn
));
2014 note_stores (PATTERN (insn
), mark_reg_clobber
, NULL
);
2016 /* Mark any registers dead after INSN as dead now. */
2018 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2019 if (REG_NOTE_KIND (link
) == REG_DEAD
)
2020 mark_reg_death (XEXP (link
, 0));
2022 /* Mark any registers set in INSN as live,
2023 and mark them as conflicting with all other live regs.
2024 Clobbers are processed again, so they conflict with
2025 the registers that are set. */
2027 note_stores (PATTERN (insn
), mark_reg_store
, NULL
);
2030 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2031 if (REG_NOTE_KIND (link
) == REG_INC
)
2032 mark_reg_store (XEXP (link
, 0), NULL_RTX
, NULL
);
2034 while (n_regs_set
-- > 0)
2036 rtx note
= find_regno_note (insn
, REG_UNUSED
,
2037 REGNO (regs_set
[n_regs_set
]));
2041 mark_reg_death (XEXP (note
, 0));
2045 bitmap_clear (&curr_regs_live
);
2046 if (flag_ira_region
== IRA_REGION_MIXED
2047 || flag_ira_region
== IRA_REGION_ALL
)
2048 FOR_EACH_LOOP (loop
, 0)
2050 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop
)->regs_live
, 0, j
, bi
)
2051 if (! bitmap_bit_p (&LOOP_DATA (loop
)->regs_ref
, j
))
2053 enum reg_class pressure_class
;
2056 pressure_class
= get_regno_pressure_class (j
, &nregs
);
2057 LOOP_DATA (loop
)->max_reg_pressure
[pressure_class
] -= nregs
;
2060 if (dump_file
== NULL
)
2062 FOR_EACH_LOOP (loop
, 0)
2064 parent
= loop_outer (loop
);
2065 fprintf (dump_file
, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
2066 loop
->num
, (parent
== NULL
? -1 : parent
->num
),
2067 loop
->header
->index
, loop_depth (loop
));
2068 fprintf (dump_file
, "\n ref. regnos:");
2069 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop
)->regs_ref
, 0, j
, bi
)
2070 fprintf (dump_file
, " %d", j
);
2071 fprintf (dump_file
, "\n live regnos:");
2072 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop
)->regs_live
, 0, j
, bi
)
2073 fprintf (dump_file
, " %d", j
);
2074 fprintf (dump_file
, "\n Pressure:");
2075 for (i
= 0; (int) i
< ira_pressure_classes_num
; i
++)
2077 enum reg_class pressure_class
;
2079 pressure_class
= ira_pressure_classes
[i
];
2080 if (LOOP_DATA (loop
)->max_reg_pressure
[pressure_class
] == 0)
2082 fprintf (dump_file
, " %s=%d", reg_class_names
[pressure_class
],
2083 LOOP_DATA (loop
)->max_reg_pressure
[pressure_class
]);
2085 fprintf (dump_file
, "\n");
2091 /* Move the invariants out of the loops. */
2094 move_loop_invariants (void)
2098 if (flag_ira_loop_pressure
)
2101 regstat_init_n_sets_and_refs ();
2102 ira_set_pseudo_classes (true, dump_file
);
2103 calculate_loop_reg_pressure ();
2104 regstat_free_n_sets_and_refs ();
2106 df_set_flags (DF_EQ_NOTES
+ DF_DEFER_INSN_RESCAN
);
2107 /* Process the loops, innermost first. */
2108 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
2111 /* move_single_loop_invariants for very large loops
2112 is time consuming and might need a lot of memory. */
2113 if (loop
->num_nodes
<= (unsigned) LOOP_INVARIANT_MAX_BBS_IN_LOOP
)
2114 move_single_loop_invariants (loop
);
2117 FOR_EACH_LOOP (loop
, 0)
2119 free_loop_data (loop
);
2122 if (flag_ira_loop_pressure
)
2123 /* There is no sense to keep this info because it was most
2124 probably outdated by subsequent passes. */
2126 free (invariant_table
);
2127 invariant_table
= NULL
;
2128 invariant_table_size
= 0;
2130 checking_verify_flow_info ();