PR c++/54198
[official-gcc.git] / gcc / loop-invariant.c
blob4cc1ea6f0ccd8e9228e6943d727011b1f0a6df98
1 /* RTL-level loop invariant motion.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This implements the loop invariant motion pass. It is very simple
22 (no calls, no loads/stores, etc.). This should be sufficient to cleanup
23 things like address arithmetics -- other more complicated invariants should
24 be eliminated on GIMPLE either in tree-ssa-loop-im.c or in tree-ssa-pre.c.
26 We proceed loop by loop -- it is simpler than trying to handle things
27 globally and should not lose much. First we inspect all sets inside loop
28 and create a dependency graph on insns (saying "to move this insn, you must
29 also move the following insns").
31 We then need to determine what to move. We estimate the number of registers
32 used and move as many invariants as possible while we still have enough free
33 registers. We prefer the expensive invariants.
35 Then we move the selected invariants out of the loop, creating a new
36 temporaries for them if necessary. */
38 #include "config.h"
39 #include "system.h"
40 #include "coretypes.h"
41 #include "tm.h"
42 #include "hard-reg-set.h"
43 #include "rtl.h"
44 #include "tm_p.h"
45 #include "obstack.h"
46 #include "basic-block.h"
47 #include "cfgloop.h"
48 #include "expr.h"
49 #include "recog.h"
50 #include "function.h"
51 #include "flags.h"
52 #include "df.h"
53 #include "hashtab.h"
54 #include "except.h"
55 #include "params.h"
56 #include "regs.h"
57 #include "ira.h"
58 #include "dumpfile.h"
60 /* The data stored for the loop. */
62 struct loop_data
64 struct loop *outermost_exit; /* The outermost exit of the loop. */
65 bool has_call; /* True if the loop contains a call. */
66 /* Maximal register pressure inside loop for given register class
67 (defined only for the pressure classes). */
68 int max_reg_pressure[N_REG_CLASSES];
69 /* Loop regs referenced and live pseudo-registers. */
70 bitmap_head regs_ref;
71 bitmap_head regs_live;
74 #define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
76 /* The description of an use. */
78 struct use
80 rtx *pos; /* Position of the use. */
81 rtx insn; /* The insn in that the use occurs. */
82 unsigned addr_use_p; /* Whether the use occurs in an address. */
83 struct use *next; /* Next use in the list. */
86 /* The description of a def. */
88 struct def
90 struct use *uses; /* The list of uses that are uniquely reached
91 by it. */
92 unsigned n_uses; /* Number of such uses. */
93 unsigned n_addr_uses; /* Number of uses in addresses. */
94 unsigned invno; /* The corresponding invariant. */
97 /* The data stored for each invariant. */
99 struct invariant
101 /* The number of the invariant. */
102 unsigned invno;
104 /* The number of the invariant with the same value. */
105 unsigned eqto;
107 /* If we moved the invariant out of the loop, the register that contains its
108 value. */
109 rtx reg;
111 /* If we moved the invariant out of the loop, the original regno
112 that contained its value. */
113 int orig_regno;
115 /* The definition of the invariant. */
116 struct def *def;
118 /* The insn in that it is defined. */
119 rtx insn;
121 /* Whether it is always executed. */
122 bool always_executed;
124 /* Whether to move the invariant. */
125 bool move;
127 /* Whether the invariant is cheap when used as an address. */
128 bool cheap_address;
130 /* Cost of the invariant. */
131 unsigned cost;
133 /* The invariants it depends on. */
134 bitmap depends_on;
136 /* Used for detecting already visited invariants during determining
137 costs of movements. */
138 unsigned stamp;
141 /* Currently processed loop. */
142 static struct loop *curr_loop;
144 /* Table of invariants indexed by the df_ref uid field. */
146 static unsigned int invariant_table_size = 0;
147 static struct invariant ** invariant_table;
149 /* Entry for hash table of invariant expressions. */
151 struct invariant_expr_entry
153 /* The invariant. */
154 struct invariant *inv;
156 /* Its value. */
157 rtx expr;
159 /* Its mode. */
160 enum machine_mode mode;
162 /* Its hash. */
163 hashval_t hash;
166 /* The actual stamp for marking already visited invariants during determining
167 costs of movements. */
169 static unsigned actual_stamp;
171 typedef struct invariant *invariant_p;
173 DEF_VEC_P(invariant_p);
174 DEF_VEC_ALLOC_P(invariant_p, heap);
176 /* The invariants. */
178 static VEC(invariant_p,heap) *invariants;
180 /* Check the size of the invariant table and realloc if necessary. */
182 static void
183 check_invariant_table_size (void)
185 if (invariant_table_size < DF_DEFS_TABLE_SIZE())
187 unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
188 invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
189 memset (&invariant_table[invariant_table_size], 0,
190 (new_size - invariant_table_size) * sizeof (struct rtx_iv *));
191 invariant_table_size = new_size;
195 /* Test for possibility of invariantness of X. */
197 static bool
198 check_maybe_invariant (rtx x)
200 enum rtx_code code = GET_CODE (x);
201 int i, j;
202 const char *fmt;
204 switch (code)
206 CASE_CONST_ANY:
207 case SYMBOL_REF:
208 case CONST:
209 case LABEL_REF:
210 return true;
212 case PC:
213 case CC0:
214 case UNSPEC_VOLATILE:
215 case CALL:
216 return false;
218 case REG:
219 return true;
221 case MEM:
222 /* Load/store motion is done elsewhere. ??? Perhaps also add it here?
223 It should not be hard, and might be faster than "elsewhere". */
225 /* Just handle the most trivial case where we load from an unchanging
226 location (most importantly, pic tables). */
227 if (MEM_READONLY_P (x) && !MEM_VOLATILE_P (x))
228 break;
230 return false;
232 case ASM_OPERANDS:
233 /* Don't mess with insns declared volatile. */
234 if (MEM_VOLATILE_P (x))
235 return false;
236 break;
238 default:
239 break;
242 fmt = GET_RTX_FORMAT (code);
243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
245 if (fmt[i] == 'e')
247 if (!check_maybe_invariant (XEXP (x, i)))
248 return false;
250 else if (fmt[i] == 'E')
252 for (j = 0; j < XVECLEN (x, i); j++)
253 if (!check_maybe_invariant (XVECEXP (x, i, j)))
254 return false;
258 return true;
261 /* Returns the invariant definition for USE, or NULL if USE is not
262 invariant. */
264 static struct invariant *
265 invariant_for_use (df_ref use)
267 struct df_link *defs;
268 df_ref def;
269 basic_block bb = DF_REF_BB (use), def_bb;
271 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
272 return NULL;
274 defs = DF_REF_CHAIN (use);
275 if (!defs || defs->next)
276 return NULL;
277 def = defs->ref;
278 check_invariant_table_size ();
279 if (!invariant_table[DF_REF_ID(def)])
280 return NULL;
282 def_bb = DF_REF_BB (def);
283 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
284 return NULL;
285 return invariant_table[DF_REF_ID(def)];
288 /* Computes hash value for invariant expression X in INSN. */
290 static hashval_t
291 hash_invariant_expr_1 (rtx insn, rtx x)
293 enum rtx_code code = GET_CODE (x);
294 int i, j;
295 const char *fmt;
296 hashval_t val = code;
297 int do_not_record_p;
298 df_ref use;
299 struct invariant *inv;
301 switch (code)
303 CASE_CONST_ANY:
304 case SYMBOL_REF:
305 case CONST:
306 case LABEL_REF:
307 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
309 case REG:
310 use = df_find_use (insn, x);
311 if (!use)
312 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
313 inv = invariant_for_use (use);
314 if (!inv)
315 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
317 gcc_assert (inv->eqto != ~0u);
318 return inv->eqto;
320 default:
321 break;
324 fmt = GET_RTX_FORMAT (code);
325 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
327 if (fmt[i] == 'e')
328 val ^= hash_invariant_expr_1 (insn, XEXP (x, i));
329 else if (fmt[i] == 'E')
331 for (j = 0; j < XVECLEN (x, i); j++)
332 val ^= hash_invariant_expr_1 (insn, XVECEXP (x, i, j));
334 else if (fmt[i] == 'i' || fmt[i] == 'n')
335 val ^= XINT (x, i);
338 return val;
341 /* Returns true if the invariant expressions E1 and E2 used in insns INSN1
342 and INSN2 have always the same value. */
344 static bool
345 invariant_expr_equal_p (rtx insn1, rtx e1, rtx insn2, rtx e2)
347 enum rtx_code code = GET_CODE (e1);
348 int i, j;
349 const char *fmt;
350 df_ref use1, use2;
351 struct invariant *inv1 = NULL, *inv2 = NULL;
352 rtx sub1, sub2;
354 /* If mode of only one of the operands is VOIDmode, it is not equivalent to
355 the other one. If both are VOIDmode, we rely on the caller of this
356 function to verify that their modes are the same. */
357 if (code != GET_CODE (e2) || GET_MODE (e1) != GET_MODE (e2))
358 return false;
360 switch (code)
362 CASE_CONST_ANY:
363 case SYMBOL_REF:
364 case CONST:
365 case LABEL_REF:
366 return rtx_equal_p (e1, e2);
368 case REG:
369 use1 = df_find_use (insn1, e1);
370 use2 = df_find_use (insn2, e2);
371 if (use1)
372 inv1 = invariant_for_use (use1);
373 if (use2)
374 inv2 = invariant_for_use (use2);
376 if (!inv1 && !inv2)
377 return rtx_equal_p (e1, e2);
379 if (!inv1 || !inv2)
380 return false;
382 gcc_assert (inv1->eqto != ~0u);
383 gcc_assert (inv2->eqto != ~0u);
384 return inv1->eqto == inv2->eqto;
386 default:
387 break;
390 fmt = GET_RTX_FORMAT (code);
391 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
393 if (fmt[i] == 'e')
395 sub1 = XEXP (e1, i);
396 sub2 = XEXP (e2, i);
398 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
399 return false;
402 else if (fmt[i] == 'E')
404 if (XVECLEN (e1, i) != XVECLEN (e2, i))
405 return false;
407 for (j = 0; j < XVECLEN (e1, i); j++)
409 sub1 = XVECEXP (e1, i, j);
410 sub2 = XVECEXP (e2, i, j);
412 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
413 return false;
416 else if (fmt[i] == 'i' || fmt[i] == 'n')
418 if (XINT (e1, i) != XINT (e2, i))
419 return false;
421 /* Unhandled type of subexpression, we fail conservatively. */
422 else
423 return false;
426 return true;
429 /* Returns hash value for invariant expression entry E. */
431 static hashval_t
432 hash_invariant_expr (const void *e)
434 const struct invariant_expr_entry *const entry =
435 (const struct invariant_expr_entry *) e;
437 return entry->hash;
440 /* Compares invariant expression entries E1 and E2. */
442 static int
443 eq_invariant_expr (const void *e1, const void *e2)
445 const struct invariant_expr_entry *const entry1 =
446 (const struct invariant_expr_entry *) e1;
447 const struct invariant_expr_entry *const entry2 =
448 (const struct invariant_expr_entry *) e2;
450 if (entry1->mode != entry2->mode)
451 return 0;
453 return invariant_expr_equal_p (entry1->inv->insn, entry1->expr,
454 entry2->inv->insn, entry2->expr);
457 /* Checks whether invariant with value EXPR in machine mode MODE is
458 recorded in EQ. If this is the case, return the invariant. Otherwise
459 insert INV to the table for this expression and return INV. */
461 static struct invariant *
462 find_or_insert_inv (htab_t eq, rtx expr, enum machine_mode mode,
463 struct invariant *inv)
465 hashval_t hash = hash_invariant_expr_1 (inv->insn, expr);
466 struct invariant_expr_entry *entry;
467 struct invariant_expr_entry pentry;
468 PTR *slot;
470 pentry.expr = expr;
471 pentry.inv = inv;
472 pentry.mode = mode;
473 slot = htab_find_slot_with_hash (eq, &pentry, hash, INSERT);
474 entry = (struct invariant_expr_entry *) *slot;
476 if (entry)
477 return entry->inv;
479 entry = XNEW (struct invariant_expr_entry);
480 entry->inv = inv;
481 entry->expr = expr;
482 entry->mode = mode;
483 entry->hash = hash;
484 *slot = entry;
486 return inv;
489 /* Finds invariants identical to INV and records the equivalence. EQ is the
490 hash table of the invariants. */
492 static void
493 find_identical_invariants (htab_t eq, struct invariant *inv)
495 unsigned depno;
496 bitmap_iterator bi;
497 struct invariant *dep;
498 rtx expr, set;
499 enum machine_mode mode;
501 if (inv->eqto != ~0u)
502 return;
504 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
506 dep = VEC_index (invariant_p, invariants, depno);
507 find_identical_invariants (eq, dep);
510 set = single_set (inv->insn);
511 expr = SET_SRC (set);
512 mode = GET_MODE (expr);
513 if (mode == VOIDmode)
514 mode = GET_MODE (SET_DEST (set));
515 inv->eqto = find_or_insert_inv (eq, expr, mode, inv)->invno;
517 if (dump_file && inv->eqto != inv->invno)
518 fprintf (dump_file,
519 "Invariant %d is equivalent to invariant %d.\n",
520 inv->invno, inv->eqto);
523 /* Find invariants with the same value and record the equivalences. */
525 static void
526 merge_identical_invariants (void)
528 unsigned i;
529 struct invariant *inv;
530 htab_t eq = htab_create (VEC_length (invariant_p, invariants),
531 hash_invariant_expr, eq_invariant_expr, free);
533 FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
534 find_identical_invariants (eq, inv);
536 htab_delete (eq);
539 /* Determines the basic blocks inside LOOP that are always executed and
540 stores their bitmap to ALWAYS_REACHED. MAY_EXIT is a bitmap of
541 basic blocks that may either exit the loop, or contain the call that
542 does not have to return. BODY is body of the loop obtained by
543 get_loop_body_in_dom_order. */
545 static void
546 compute_always_reached (struct loop *loop, basic_block *body,
547 bitmap may_exit, bitmap always_reached)
549 unsigned i;
551 for (i = 0; i < loop->num_nodes; i++)
553 if (dominated_by_p (CDI_DOMINATORS, loop->latch, body[i]))
554 bitmap_set_bit (always_reached, i);
556 if (bitmap_bit_p (may_exit, i))
557 return;
561 /* Finds exits out of the LOOP with body BODY. Marks blocks in that we may
562 exit the loop by cfg edge to HAS_EXIT and MAY_EXIT. In MAY_EXIT
563 additionally mark blocks that may exit due to a call. */
565 static void
566 find_exits (struct loop *loop, basic_block *body,
567 bitmap may_exit, bitmap has_exit)
569 unsigned i;
570 edge_iterator ei;
571 edge e;
572 struct loop *outermost_exit = loop, *aexit;
573 bool has_call = false;
574 rtx insn;
576 for (i = 0; i < loop->num_nodes; i++)
578 if (body[i]->loop_father == loop)
580 FOR_BB_INSNS (body[i], insn)
582 if (CALL_P (insn)
583 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
584 || !RTL_CONST_OR_PURE_CALL_P (insn)))
586 has_call = true;
587 bitmap_set_bit (may_exit, i);
588 break;
592 FOR_EACH_EDGE (e, ei, body[i]->succs)
594 if (flow_bb_inside_loop_p (loop, e->dest))
595 continue;
597 bitmap_set_bit (may_exit, i);
598 bitmap_set_bit (has_exit, i);
599 outermost_exit = find_common_loop (outermost_exit,
600 e->dest->loop_father);
602 continue;
605 /* Use the data stored for the subloop to decide whether we may exit
606 through it. It is sufficient to do this for header of the loop,
607 as other basic blocks inside it must be dominated by it. */
608 if (body[i]->loop_father->header != body[i])
609 continue;
611 if (LOOP_DATA (body[i]->loop_father)->has_call)
613 has_call = true;
614 bitmap_set_bit (may_exit, i);
616 aexit = LOOP_DATA (body[i]->loop_father)->outermost_exit;
617 if (aexit != loop)
619 bitmap_set_bit (may_exit, i);
620 bitmap_set_bit (has_exit, i);
622 if (flow_loop_nested_p (aexit, outermost_exit))
623 outermost_exit = aexit;
627 if (loop->aux == NULL)
629 loop->aux = xcalloc (1, sizeof (struct loop_data));
630 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
631 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
633 LOOP_DATA (loop)->outermost_exit = outermost_exit;
634 LOOP_DATA (loop)->has_call = has_call;
637 /* Check whether we may assign a value to X from a register. */
639 static bool
640 may_assign_reg_p (rtx x)
642 return (GET_MODE (x) != VOIDmode
643 && GET_MODE (x) != BLKmode
644 && can_copy_p (GET_MODE (x))
645 && (!REG_P (x)
646 || !HARD_REGISTER_P (x)
647 || REGNO_REG_CLASS (REGNO (x)) != NO_REGS));
650 /* Finds definitions that may correspond to invariants in LOOP with body
651 BODY. */
653 static void
654 find_defs (struct loop *loop, basic_block *body)
656 unsigned i;
657 bitmap blocks = BITMAP_ALLOC (NULL);
659 for (i = 0; i < loop->num_nodes; i++)
660 bitmap_set_bit (blocks, body[i]->index);
662 df_remove_problem (df_chain);
663 df_process_deferred_rescans ();
664 df_chain_add_problem (DF_UD_CHAIN);
665 df_set_blocks (blocks);
666 df_analyze ();
668 if (dump_file)
670 df_dump_region (dump_file);
671 fprintf (dump_file, "*****starting processing of loop ******\n");
672 print_rtl_with_bb (dump_file, get_insns (), dump_flags);
673 fprintf (dump_file, "*****ending processing of loop ******\n");
675 check_invariant_table_size ();
677 BITMAP_FREE (blocks);
680 /* Creates a new invariant for definition DEF in INSN, depending on invariants
681 in DEPENDS_ON. ALWAYS_EXECUTED is true if the insn is always executed,
682 unless the program ends due to a function call. The newly created invariant
683 is returned. */
685 static struct invariant *
686 create_new_invariant (struct def *def, rtx insn, bitmap depends_on,
687 bool always_executed)
689 struct invariant *inv = XNEW (struct invariant);
690 rtx set = single_set (insn);
691 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
693 inv->def = def;
694 inv->always_executed = always_executed;
695 inv->depends_on = depends_on;
697 /* If the set is simple, usually by moving it we move the whole store out of
698 the loop. Otherwise we save only cost of the computation. */
699 if (def)
701 inv->cost = set_rtx_cost (set, speed);
702 /* ??? Try to determine cheapness of address computation. Unfortunately
703 the address cost is only a relative measure, we can't really compare
704 it with any absolute number, but only with other address costs.
705 But here we don't have any other addresses, so compare with a magic
706 number anyway. It has to be large enough to not regress PR33928
707 (by avoiding to move reg+8,reg+16,reg+24 invariants), but small
708 enough to not regress 410.bwaves either (by still moving reg+reg
709 invariants).
710 See http://gcc.gnu.org/ml/gcc-patches/2009-10/msg01210.html . */
711 inv->cheap_address = address_cost (SET_SRC (set), word_mode,
712 ADDR_SPACE_GENERIC, speed) < 3;
714 else
716 inv->cost = set_src_cost (SET_SRC (set), speed);
717 inv->cheap_address = false;
720 inv->move = false;
721 inv->reg = NULL_RTX;
722 inv->orig_regno = -1;
723 inv->stamp = 0;
724 inv->insn = insn;
726 inv->invno = VEC_length (invariant_p, invariants);
727 inv->eqto = ~0u;
728 if (def)
729 def->invno = inv->invno;
730 VEC_safe_push (invariant_p, heap, invariants, inv);
732 if (dump_file)
734 fprintf (dump_file,
735 "Set in insn %d is invariant (%d), cost %d, depends on ",
736 INSN_UID (insn), inv->invno, inv->cost);
737 dump_bitmap (dump_file, inv->depends_on);
740 return inv;
743 /* Record USE at DEF. */
745 static void
746 record_use (struct def *def, df_ref use)
748 struct use *u = XNEW (struct use);
750 u->pos = DF_REF_REAL_LOC (use);
751 u->insn = DF_REF_INSN (use);
752 u->addr_use_p = (DF_REF_TYPE (use) == DF_REF_REG_MEM_LOAD
753 || DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE);
754 u->next = def->uses;
755 def->uses = u;
756 def->n_uses++;
757 if (u->addr_use_p)
758 def->n_addr_uses++;
761 /* Finds the invariants USE depends on and store them to the DEPENDS_ON
762 bitmap. Returns true if all dependencies of USE are known to be
763 loop invariants, false otherwise. */
765 static bool
766 check_dependency (basic_block bb, df_ref use, bitmap depends_on)
768 df_ref def;
769 basic_block def_bb;
770 struct df_link *defs;
771 struct def *def_data;
772 struct invariant *inv;
774 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
775 return false;
777 defs = DF_REF_CHAIN (use);
778 if (!defs)
779 return true;
781 if (defs->next)
782 return false;
784 def = defs->ref;
785 check_invariant_table_size ();
786 inv = invariant_table[DF_REF_ID(def)];
787 if (!inv)
788 return false;
790 def_data = inv->def;
791 gcc_assert (def_data != NULL);
793 def_bb = DF_REF_BB (def);
794 /* Note that in case bb == def_bb, we know that the definition
795 dominates insn, because def has invariant_table[DF_REF_ID(def)]
796 defined and we process the insns in the basic block bb
797 sequentially. */
798 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
799 return false;
801 bitmap_set_bit (depends_on, def_data->invno);
802 return true;
806 /* Finds the invariants INSN depends on and store them to the DEPENDS_ON
807 bitmap. Returns true if all dependencies of INSN are known to be
808 loop invariants, false otherwise. */
810 static bool
811 check_dependencies (rtx insn, bitmap depends_on)
813 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
814 df_ref *use_rec;
815 basic_block bb = BLOCK_FOR_INSN (insn);
817 for (use_rec = DF_INSN_INFO_USES (insn_info); *use_rec; use_rec++)
818 if (!check_dependency (bb, *use_rec, depends_on))
819 return false;
820 for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
821 if (!check_dependency (bb, *use_rec, depends_on))
822 return false;
824 return true;
827 /* Finds invariant in INSN. ALWAYS_REACHED is true if the insn is always
828 executed. ALWAYS_EXECUTED is true if the insn is always executed,
829 unless the program ends due to a function call. */
831 static void
832 find_invariant_insn (rtx insn, bool always_reached, bool always_executed)
834 df_ref ref;
835 struct def *def;
836 bitmap depends_on;
837 rtx set, dest;
838 bool simple = true;
839 struct invariant *inv;
841 #ifdef HAVE_cc0
842 /* We can't move a CC0 setter without the user. */
843 if (sets_cc0_p (insn))
844 return;
845 #endif
847 set = single_set (insn);
848 if (!set)
849 return;
850 dest = SET_DEST (set);
852 if (!REG_P (dest)
853 || HARD_REGISTER_P (dest))
854 simple = false;
856 if (!may_assign_reg_p (SET_DEST (set))
857 || !check_maybe_invariant (SET_SRC (set)))
858 return;
860 /* If the insn can throw exception, we cannot move it at all without changing
861 cfg. */
862 if (can_throw_internal (insn))
863 return;
865 /* We cannot make trapping insn executed, unless it was executed before. */
866 if (may_trap_or_fault_p (PATTERN (insn)) && !always_reached)
867 return;
869 depends_on = BITMAP_ALLOC (NULL);
870 if (!check_dependencies (insn, depends_on))
872 BITMAP_FREE (depends_on);
873 return;
876 if (simple)
877 def = XCNEW (struct def);
878 else
879 def = NULL;
881 inv = create_new_invariant (def, insn, depends_on, always_executed);
883 if (simple)
885 ref = df_find_def (insn, dest);
886 check_invariant_table_size ();
887 invariant_table[DF_REF_ID(ref)] = inv;
891 /* Record registers used in INSN that have a unique invariant definition. */
893 static void
894 record_uses (rtx insn)
896 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
897 df_ref *use_rec;
898 struct invariant *inv;
900 for (use_rec = DF_INSN_INFO_USES (insn_info); *use_rec; use_rec++)
902 df_ref use = *use_rec;
903 inv = invariant_for_use (use);
904 if (inv)
905 record_use (inv->def, use);
907 for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
909 df_ref use = *use_rec;
910 inv = invariant_for_use (use);
911 if (inv)
912 record_use (inv->def, use);
916 /* Finds invariants in INSN. ALWAYS_REACHED is true if the insn is always
917 executed. ALWAYS_EXECUTED is true if the insn is always executed,
918 unless the program ends due to a function call. */
920 static void
921 find_invariants_insn (rtx insn, bool always_reached, bool always_executed)
923 find_invariant_insn (insn, always_reached, always_executed);
924 record_uses (insn);
927 /* Finds invariants in basic block BB. ALWAYS_REACHED is true if the
928 basic block is always executed. ALWAYS_EXECUTED is true if the basic
929 block is always executed, unless the program ends due to a function
930 call. */
932 static void
933 find_invariants_bb (basic_block bb, bool always_reached, bool always_executed)
935 rtx insn;
937 FOR_BB_INSNS (bb, insn)
939 if (!NONDEBUG_INSN_P (insn))
940 continue;
942 find_invariants_insn (insn, always_reached, always_executed);
944 if (always_reached
945 && CALL_P (insn)
946 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
947 || ! RTL_CONST_OR_PURE_CALL_P (insn)))
948 always_reached = false;
952 /* Finds invariants in LOOP with body BODY. ALWAYS_REACHED is the bitmap of
953 basic blocks in BODY that are always executed. ALWAYS_EXECUTED is the
954 bitmap of basic blocks in BODY that are always executed unless the program
955 ends due to a function call. */
957 static void
958 find_invariants_body (struct loop *loop, basic_block *body,
959 bitmap always_reached, bitmap always_executed)
961 unsigned i;
963 for (i = 0; i < loop->num_nodes; i++)
964 find_invariants_bb (body[i],
965 bitmap_bit_p (always_reached, i),
966 bitmap_bit_p (always_executed, i));
969 /* Finds invariants in LOOP. */
971 static void
972 find_invariants (struct loop *loop)
974 bitmap may_exit = BITMAP_ALLOC (NULL);
975 bitmap always_reached = BITMAP_ALLOC (NULL);
976 bitmap has_exit = BITMAP_ALLOC (NULL);
977 bitmap always_executed = BITMAP_ALLOC (NULL);
978 basic_block *body = get_loop_body_in_dom_order (loop);
980 find_exits (loop, body, may_exit, has_exit);
981 compute_always_reached (loop, body, may_exit, always_reached);
982 compute_always_reached (loop, body, has_exit, always_executed);
984 find_defs (loop, body);
985 find_invariants_body (loop, body, always_reached, always_executed);
986 merge_identical_invariants ();
988 BITMAP_FREE (always_reached);
989 BITMAP_FREE (always_executed);
990 BITMAP_FREE (may_exit);
991 BITMAP_FREE (has_exit);
992 free (body);
995 /* Frees a list of uses USE. */
997 static void
998 free_use_list (struct use *use)
1000 struct use *next;
1002 for (; use; use = next)
1004 next = use->next;
1005 free (use);
1009 /* Return pressure class and number of hard registers (through *NREGS)
1010 for destination of INSN. */
1011 static enum reg_class
1012 get_pressure_class_and_nregs (rtx insn, int *nregs)
1014 rtx reg;
1015 enum reg_class pressure_class;
1016 rtx set = single_set (insn);
1018 /* Considered invariant insns have only one set. */
1019 gcc_assert (set != NULL_RTX);
1020 reg = SET_DEST (set);
1021 if (GET_CODE (reg) == SUBREG)
1022 reg = SUBREG_REG (reg);
1023 if (MEM_P (reg))
1025 *nregs = 0;
1026 pressure_class = NO_REGS;
1028 else
1030 if (! REG_P (reg))
1031 reg = NULL_RTX;
1032 if (reg == NULL_RTX)
1033 pressure_class = GENERAL_REGS;
1034 else
1036 pressure_class = reg_allocno_class (REGNO (reg));
1037 pressure_class = ira_pressure_class_translate[pressure_class];
1039 *nregs
1040 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
1042 return pressure_class;
1045 /* Calculates cost and number of registers needed for moving invariant INV
1046 out of the loop and stores them to *COST and *REGS_NEEDED. */
1048 static void
1049 get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
1051 int i, acomp_cost;
1052 unsigned aregs_needed[N_REG_CLASSES];
1053 unsigned depno;
1054 struct invariant *dep;
1055 bitmap_iterator bi;
1057 /* Find the representative of the class of the equivalent invariants. */
1058 inv = VEC_index (invariant_p, invariants, inv->eqto);
1060 *comp_cost = 0;
1061 if (! flag_ira_loop_pressure)
1062 regs_needed[0] = 0;
1063 else
1065 for (i = 0; i < ira_pressure_classes_num; i++)
1066 regs_needed[ira_pressure_classes[i]] = 0;
1069 if (inv->move
1070 || inv->stamp == actual_stamp)
1071 return;
1072 inv->stamp = actual_stamp;
1074 if (! flag_ira_loop_pressure)
1075 regs_needed[0]++;
1076 else
1078 int nregs;
1079 enum reg_class pressure_class;
1081 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1082 regs_needed[pressure_class] += nregs;
1085 if (!inv->cheap_address
1086 || inv->def->n_addr_uses < inv->def->n_uses)
1087 (*comp_cost) += inv->cost;
1089 #ifdef STACK_REGS
1091 /* Hoisting constant pool constants into stack regs may cost more than
1092 just single register. On x87, the balance is affected both by the
1093 small number of FP registers, and by its register stack organization,
1094 that forces us to add compensation code in and around the loop to
1095 shuffle the operands to the top of stack before use, and pop them
1096 from the stack after the loop finishes.
1098 To model this effect, we increase the number of registers needed for
1099 stack registers by two: one register push, and one register pop.
1100 This usually has the effect that FP constant loads from the constant
1101 pool are not moved out of the loop.
1103 Note that this also means that dependent invariants can not be moved.
1104 However, the primary purpose of this pass is to move loop invariant
1105 address arithmetic out of loops, and address arithmetic that depends
1106 on floating point constants is unlikely to ever occur. */
1107 rtx set = single_set (inv->insn);
1108 if (set
1109 && IS_STACK_MODE (GET_MODE (SET_SRC (set)))
1110 && constant_pool_constant_p (SET_SRC (set)))
1112 if (flag_ira_loop_pressure)
1113 regs_needed[ira_stack_reg_pressure_class] += 2;
1114 else
1115 regs_needed[0] += 2;
1118 #endif
1120 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
1122 bool check_p;
1124 dep = VEC_index (invariant_p, invariants, depno);
1126 get_inv_cost (dep, &acomp_cost, aregs_needed);
1128 if (! flag_ira_loop_pressure)
1129 check_p = aregs_needed[0] != 0;
1130 else
1132 for (i = 0; i < ira_pressure_classes_num; i++)
1133 if (aregs_needed[ira_pressure_classes[i]] != 0)
1134 break;
1135 check_p = i < ira_pressure_classes_num;
1137 if (check_p
1138 /* We need to check always_executed, since if the original value of
1139 the invariant may be preserved, we may need to keep it in a
1140 separate register. TODO check whether the register has an
1141 use outside of the loop. */
1142 && dep->always_executed
1143 && !dep->def->uses->next)
1145 /* If this is a single use, after moving the dependency we will not
1146 need a new register. */
1147 if (! flag_ira_loop_pressure)
1148 aregs_needed[0]--;
1149 else
1151 int nregs;
1152 enum reg_class pressure_class;
1154 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1155 aregs_needed[pressure_class] -= nregs;
1159 if (! flag_ira_loop_pressure)
1160 regs_needed[0] += aregs_needed[0];
1161 else
1163 for (i = 0; i < ira_pressure_classes_num; i++)
1164 regs_needed[ira_pressure_classes[i]]
1165 += aregs_needed[ira_pressure_classes[i]];
1167 (*comp_cost) += acomp_cost;
1171 /* Calculates gain for eliminating invariant INV. REGS_USED is the number
1172 of registers used in the loop, NEW_REGS is the number of new variables
1173 already added due to the invariant motion. The number of registers needed
1174 for it is stored in *REGS_NEEDED. SPEED and CALL_P are flags passed
1175 through to estimate_reg_pressure_cost. */
1177 static int
1178 gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
1179 unsigned *new_regs, unsigned regs_used,
1180 bool speed, bool call_p)
1182 int comp_cost, size_cost;
1184 actual_stamp++;
1186 get_inv_cost (inv, &comp_cost, regs_needed);
1188 if (! flag_ira_loop_pressure)
1190 size_cost = (estimate_reg_pressure_cost (new_regs[0] + regs_needed[0],
1191 regs_used, speed, call_p)
1192 - estimate_reg_pressure_cost (new_regs[0],
1193 regs_used, speed, call_p));
1195 else
1197 int i;
1198 enum reg_class pressure_class;
1200 for (i = 0; i < ira_pressure_classes_num; i++)
1202 pressure_class = ira_pressure_classes[i];
1203 if ((int) new_regs[pressure_class]
1204 + (int) regs_needed[pressure_class]
1205 + LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
1206 + IRA_LOOP_RESERVED_REGS
1207 > ira_class_hard_regs_num[pressure_class])
1208 break;
1210 if (i < ira_pressure_classes_num)
1211 /* There will be register pressure excess and we want not to
1212 make this loop invariant motion. All loop invariants with
1213 non-positive gains will be rejected in function
1214 find_invariants_to_move. Therefore we return the negative
1215 number here.
1217 One could think that this rejects also expensive loop
1218 invariant motions and this will hurt code performance.
1219 However numerous experiments with different heuristics
1220 taking invariant cost into account did not confirm this
1221 assumption. There are possible explanations for this
1222 result:
1223 o probably all expensive invariants were already moved out
1224 of the loop by PRE and gimple invariant motion pass.
1225 o expensive invariant execution will be hidden by insn
1226 scheduling or OOO processor hardware because usually such
1227 invariants have a lot of freedom to be executed
1228 out-of-order.
1229 Another reason for ignoring invariant cost vs spilling cost
1230 heuristics is also in difficulties to evaluate accurately
1231 spill cost at this stage. */
1232 return -1;
1233 else
1234 size_cost = 0;
1237 return comp_cost - size_cost;
1240 /* Finds invariant with best gain for moving. Returns the gain, stores
1241 the invariant in *BEST and number of registers needed for it to
1242 *REGS_NEEDED. REGS_USED is the number of registers used in the loop.
1243 NEW_REGS is the number of new variables already added due to invariant
1244 motion. */
1246 static int
1247 best_gain_for_invariant (struct invariant **best, unsigned *regs_needed,
1248 unsigned *new_regs, unsigned regs_used,
1249 bool speed, bool call_p)
1251 struct invariant *inv;
1252 int i, gain = 0, again;
1253 unsigned aregs_needed[N_REG_CLASSES], invno;
1255 FOR_EACH_VEC_ELT (invariant_p, invariants, invno, inv)
1257 if (inv->move)
1258 continue;
1260 /* Only consider the "representatives" of equivalent invariants. */
1261 if (inv->eqto != inv->invno)
1262 continue;
1264 again = gain_for_invariant (inv, aregs_needed, new_regs, regs_used,
1265 speed, call_p);
1266 if (again > gain)
1268 gain = again;
1269 *best = inv;
1270 if (! flag_ira_loop_pressure)
1271 regs_needed[0] = aregs_needed[0];
1272 else
1274 for (i = 0; i < ira_pressure_classes_num; i++)
1275 regs_needed[ira_pressure_classes[i]]
1276 = aregs_needed[ira_pressure_classes[i]];
1281 return gain;
1284 /* Marks invariant INVNO and all its dependencies for moving. */
1286 static void
1287 set_move_mark (unsigned invno, int gain)
1289 struct invariant *inv = VEC_index (invariant_p, invariants, invno);
1290 bitmap_iterator bi;
1292 /* Find the representative of the class of the equivalent invariants. */
1293 inv = VEC_index (invariant_p, invariants, inv->eqto);
1295 if (inv->move)
1296 return;
1297 inv->move = true;
1299 if (dump_file)
1301 if (gain >= 0)
1302 fprintf (dump_file, "Decided to move invariant %d -- gain %d\n",
1303 invno, gain);
1304 else
1305 fprintf (dump_file, "Decided to move dependent invariant %d\n",
1306 invno);
1309 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, invno, bi)
1311 set_move_mark (invno, -1);
1315 /* Determines which invariants to move. */
1317 static void
1318 find_invariants_to_move (bool speed, bool call_p)
1320 int gain;
1321 unsigned i, regs_used, regs_needed[N_REG_CLASSES], new_regs[N_REG_CLASSES];
1322 struct invariant *inv = NULL;
1324 if (!VEC_length (invariant_p, invariants))
1325 return;
1327 if (flag_ira_loop_pressure)
1328 /* REGS_USED is actually never used when the flag is on. */
1329 regs_used = 0;
1330 else
1331 /* We do not really do a good job in estimating number of
1332 registers used; we put some initial bound here to stand for
1333 induction variables etc. that we do not detect. */
1335 unsigned int n_regs = DF_REG_SIZE (df);
1337 regs_used = 2;
1339 for (i = 0; i < n_regs; i++)
1341 if (!DF_REGNO_FIRST_DEF (i) && DF_REGNO_LAST_USE (i))
1343 /* This is a value that is used but not changed inside loop. */
1344 regs_used++;
1349 if (! flag_ira_loop_pressure)
1350 new_regs[0] = regs_needed[0] = 0;
1351 else
1353 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1354 new_regs[ira_pressure_classes[i]] = 0;
1356 while ((gain = best_gain_for_invariant (&inv, regs_needed,
1357 new_regs, regs_used,
1358 speed, call_p)) > 0)
1360 set_move_mark (inv->invno, gain);
1361 if (! flag_ira_loop_pressure)
1362 new_regs[0] += regs_needed[0];
1363 else
1365 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1366 new_regs[ira_pressure_classes[i]]
1367 += regs_needed[ira_pressure_classes[i]];
1372 /* Replace the uses, reached by the definition of invariant INV, by REG.
1374 IN_GROUP is nonzero if this is part of a group of changes that must be
1375 performed as a group. In that case, the changes will be stored. The
1376 function `apply_change_group' will validate and apply the changes. */
1378 static int
1379 replace_uses (struct invariant *inv, rtx reg, bool in_group)
1381 /* Replace the uses we know to be dominated. It saves work for copy
1382 propagation, and also it is necessary so that dependent invariants
1383 are computed right. */
1384 if (inv->def)
1386 struct use *use;
1387 for (use = inv->def->uses; use; use = use->next)
1388 validate_change (use->insn, use->pos, reg, true);
1390 /* If we aren't part of a larger group, apply the changes now. */
1391 if (!in_group)
1392 return apply_change_group ();
1395 return 1;
1398 /* Move invariant INVNO out of the LOOP. Returns true if this succeeds, false
1399 otherwise. */
1401 static bool
1402 move_invariant_reg (struct loop *loop, unsigned invno)
1404 struct invariant *inv = VEC_index (invariant_p, invariants, invno);
1405 struct invariant *repr = VEC_index (invariant_p, invariants, inv->eqto);
1406 unsigned i;
1407 basic_block preheader = loop_preheader_edge (loop)->src;
1408 rtx reg, set, dest, note;
1409 bitmap_iterator bi;
1410 int regno = -1;
1412 if (inv->reg)
1413 return true;
1414 if (!repr->move)
1415 return false;
1417 /* If this is a representative of the class of equivalent invariants,
1418 really move the invariant. Otherwise just replace its use with
1419 the register used for the representative. */
1420 if (inv == repr)
1422 if (inv->depends_on)
1424 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, i, bi)
1426 if (!move_invariant_reg (loop, i))
1427 goto fail;
1431 /* Move the set out of the loop. If the set is always executed (we could
1432 omit this condition if we know that the register is unused outside of
1433 the loop, but it does not seem worth finding out) and it has no uses
1434 that would not be dominated by it, we may just move it (TODO).
1435 Otherwise we need to create a temporary register. */
1436 set = single_set (inv->insn);
1437 reg = dest = SET_DEST (set);
1438 if (GET_CODE (reg) == SUBREG)
1439 reg = SUBREG_REG (reg);
1440 if (REG_P (reg))
1441 regno = REGNO (reg);
1443 reg = gen_reg_rtx_and_attrs (dest);
1445 /* Try replacing the destination by a new pseudoregister. */
1446 validate_change (inv->insn, &SET_DEST (set), reg, true);
1448 /* As well as all the dominated uses. */
1449 replace_uses (inv, reg, true);
1451 /* And validate all the changes. */
1452 if (!apply_change_group ())
1453 goto fail;
1455 emit_insn_after (gen_move_insn (dest, reg), inv->insn);
1456 reorder_insns (inv->insn, inv->insn, BB_END (preheader));
1458 /* If there is a REG_EQUAL note on the insn we just moved, and the
1459 insn is in a basic block that is not always executed or the note
1460 contains something for which we don't know the invariant status,
1461 the note may no longer be valid after we move the insn. Note that
1462 uses in REG_EQUAL notes are taken into account in the computation
1463 of invariants, so it is safe to retain the note even if it contains
1464 register references for which we know the invariant status. */
1465 if ((note = find_reg_note (inv->insn, REG_EQUAL, NULL_RTX))
1466 && (!inv->always_executed
1467 || !check_maybe_invariant (XEXP (note, 0))))
1468 remove_note (inv->insn, note);
1470 else
1472 if (!move_invariant_reg (loop, repr->invno))
1473 goto fail;
1474 reg = repr->reg;
1475 regno = repr->orig_regno;
1476 if (!replace_uses (inv, reg, false))
1477 goto fail;
1478 set = single_set (inv->insn);
1479 emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
1480 delete_insn (inv->insn);
1483 inv->reg = reg;
1484 inv->orig_regno = regno;
1486 return true;
1488 fail:
1489 /* If we failed, clear move flag, so that we do not try to move inv
1490 again. */
1491 if (dump_file)
1492 fprintf (dump_file, "Failed to move invariant %d\n", invno);
1493 inv->move = false;
1494 inv->reg = NULL_RTX;
1495 inv->orig_regno = -1;
1497 return false;
1500 /* Move selected invariant out of the LOOP. Newly created regs are marked
1501 in TEMPORARY_REGS. */
1503 static void
1504 move_invariants (struct loop *loop)
1506 struct invariant *inv;
1507 unsigned i;
1509 FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
1510 move_invariant_reg (loop, i);
1511 if (flag_ira_loop_pressure && resize_reg_info ())
1513 FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
1514 if (inv->reg != NULL_RTX)
1516 if (inv->orig_regno >= 0)
1517 setup_reg_classes (REGNO (inv->reg),
1518 reg_preferred_class (inv->orig_regno),
1519 reg_alternate_class (inv->orig_regno),
1520 reg_allocno_class (inv->orig_regno));
1521 else
1522 setup_reg_classes (REGNO (inv->reg),
1523 GENERAL_REGS, NO_REGS, GENERAL_REGS);
1528 /* Initializes invariant motion data. */
1530 static void
1531 init_inv_motion_data (void)
1533 actual_stamp = 1;
1535 invariants = VEC_alloc (invariant_p, heap, 100);
1538 /* Frees the data allocated by invariant motion. */
1540 static void
1541 free_inv_motion_data (void)
1543 unsigned i;
1544 struct def *def;
1545 struct invariant *inv;
1547 check_invariant_table_size ();
1548 for (i = 0; i < DF_DEFS_TABLE_SIZE (); i++)
1550 inv = invariant_table[i];
1551 if (inv)
1553 def = inv->def;
1554 gcc_assert (def != NULL);
1556 free_use_list (def->uses);
1557 free (def);
1558 invariant_table[i] = NULL;
1562 FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
1564 BITMAP_FREE (inv->depends_on);
1565 free (inv);
1567 VEC_free (invariant_p, heap, invariants);
1570 /* Move the invariants out of the LOOP. */
1572 static void
1573 move_single_loop_invariants (struct loop *loop)
1575 init_inv_motion_data ();
1577 find_invariants (loop);
1578 find_invariants_to_move (optimize_loop_for_speed_p (loop),
1579 LOOP_DATA (loop)->has_call);
1580 move_invariants (loop);
1582 free_inv_motion_data ();
1585 /* Releases the auxiliary data for LOOP. */
1587 static void
1588 free_loop_data (struct loop *loop)
1590 struct loop_data *data = LOOP_DATA (loop);
1591 if (!data)
1592 return;
1594 bitmap_clear (&LOOP_DATA (loop)->regs_ref);
1595 bitmap_clear (&LOOP_DATA (loop)->regs_live);
1596 free (data);
1597 loop->aux = NULL;
1602 /* Registers currently living. */
1603 static bitmap_head curr_regs_live;
1605 /* Current reg pressure for each pressure class. */
1606 static int curr_reg_pressure[N_REG_CLASSES];
1608 /* Record all regs that are set in any one insn. Communication from
1609 mark_reg_{store,clobber} and global_conflicts. Asm can refer to
1610 all hard-registers. */
1611 static rtx regs_set[(FIRST_PSEUDO_REGISTER > MAX_RECOG_OPERANDS
1612 ? FIRST_PSEUDO_REGISTER : MAX_RECOG_OPERANDS) * 2];
1613 /* Number of regs stored in the previous array. */
1614 static int n_regs_set;
1616 /* Return pressure class and number of needed hard registers (through
1617 *NREGS) of register REGNO. */
1618 static enum reg_class
1619 get_regno_pressure_class (int regno, int *nregs)
1621 if (regno >= FIRST_PSEUDO_REGISTER)
1623 enum reg_class pressure_class;
1625 pressure_class = reg_allocno_class (regno);
1626 pressure_class = ira_pressure_class_translate[pressure_class];
1627 *nregs
1628 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
1629 return pressure_class;
1631 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
1632 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
1634 *nregs = 1;
1635 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
1637 else
1639 *nregs = 0;
1640 return NO_REGS;
1644 /* Increase (if INCR_P) or decrease current register pressure for
1645 register REGNO. */
1646 static void
1647 change_pressure (int regno, bool incr_p)
1649 int nregs;
1650 enum reg_class pressure_class;
1652 pressure_class = get_regno_pressure_class (regno, &nregs);
1653 if (! incr_p)
1654 curr_reg_pressure[pressure_class] -= nregs;
1655 else
1657 curr_reg_pressure[pressure_class] += nregs;
1658 if (LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
1659 < curr_reg_pressure[pressure_class])
1660 LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
1661 = curr_reg_pressure[pressure_class];
1665 /* Mark REGNO birth. */
1666 static void
1667 mark_regno_live (int regno)
1669 struct loop *loop;
1671 for (loop = curr_loop;
1672 loop != current_loops->tree_root;
1673 loop = loop_outer (loop))
1674 bitmap_set_bit (&LOOP_DATA (loop)->regs_live, regno);
1675 if (!bitmap_set_bit (&curr_regs_live, regno))
1676 return;
1677 change_pressure (regno, true);
1680 /* Mark REGNO death. */
1681 static void
1682 mark_regno_death (int regno)
1684 if (! bitmap_clear_bit (&curr_regs_live, regno))
1685 return;
1686 change_pressure (regno, false);
1689 /* Mark setting register REG. */
1690 static void
1691 mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
1692 void *data ATTRIBUTE_UNUSED)
1694 int regno;
1696 if (GET_CODE (reg) == SUBREG)
1697 reg = SUBREG_REG (reg);
1699 if (! REG_P (reg))
1700 return;
1702 regs_set[n_regs_set++] = reg;
1704 regno = REGNO (reg);
1706 if (regno >= FIRST_PSEUDO_REGISTER)
1707 mark_regno_live (regno);
1708 else
1710 int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
1712 while (regno < last)
1714 mark_regno_live (regno);
1715 regno++;
1720 /* Mark clobbering register REG. */
1721 static void
1722 mark_reg_clobber (rtx reg, const_rtx setter, void *data)
1724 if (GET_CODE (setter) == CLOBBER)
1725 mark_reg_store (reg, setter, data);
1728 /* Mark register REG death. */
1729 static void
1730 mark_reg_death (rtx reg)
1732 int regno = REGNO (reg);
1734 if (regno >= FIRST_PSEUDO_REGISTER)
1735 mark_regno_death (regno);
1736 else
1738 int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
1740 while (regno < last)
1742 mark_regno_death (regno);
1743 regno++;
1748 /* Mark occurrence of registers in X for the current loop. */
1749 static void
1750 mark_ref_regs (rtx x)
1752 RTX_CODE code;
1753 int i;
1754 const char *fmt;
1756 if (!x)
1757 return;
1759 code = GET_CODE (x);
1760 if (code == REG)
1762 struct loop *loop;
1764 for (loop = curr_loop;
1765 loop != current_loops->tree_root;
1766 loop = loop_outer (loop))
1767 bitmap_set_bit (&LOOP_DATA (loop)->regs_ref, REGNO (x));
1768 return;
1771 fmt = GET_RTX_FORMAT (code);
1772 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1773 if (fmt[i] == 'e')
1774 mark_ref_regs (XEXP (x, i));
1775 else if (fmt[i] == 'E')
1777 int j;
1779 for (j = 0; j < XVECLEN (x, i); j++)
1780 mark_ref_regs (XVECEXP (x, i, j));
1784 /* Calculate register pressure in the loops. */
1785 static void
1786 calculate_loop_reg_pressure (void)
1788 int i;
1789 unsigned int j;
1790 bitmap_iterator bi;
1791 basic_block bb;
1792 rtx insn, link;
1793 struct loop *loop, *parent;
1794 loop_iterator li;
1796 FOR_EACH_LOOP (li, loop, 0)
1797 if (loop->aux == NULL)
1799 loop->aux = xcalloc (1, sizeof (struct loop_data));
1800 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
1801 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
1803 ira_setup_eliminable_regset ();
1804 bitmap_initialize (&curr_regs_live, &reg_obstack);
1805 FOR_EACH_BB (bb)
1807 curr_loop = bb->loop_father;
1808 if (curr_loop == current_loops->tree_root)
1809 continue;
1811 for (loop = curr_loop;
1812 loop != current_loops->tree_root;
1813 loop = loop_outer (loop))
1814 bitmap_ior_into (&LOOP_DATA (loop)->regs_live, DF_LR_IN (bb));
1816 bitmap_copy (&curr_regs_live, DF_LR_IN (bb));
1817 for (i = 0; i < ira_pressure_classes_num; i++)
1818 curr_reg_pressure[ira_pressure_classes[i]] = 0;
1819 EXECUTE_IF_SET_IN_BITMAP (&curr_regs_live, 0, j, bi)
1820 change_pressure (j, true);
1822 FOR_BB_INSNS (bb, insn)
1824 if (! NONDEBUG_INSN_P (insn))
1825 continue;
1827 mark_ref_regs (PATTERN (insn));
1828 n_regs_set = 0;
1829 note_stores (PATTERN (insn), mark_reg_clobber, NULL);
1831 /* Mark any registers dead after INSN as dead now. */
1833 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1834 if (REG_NOTE_KIND (link) == REG_DEAD)
1835 mark_reg_death (XEXP (link, 0));
1837 /* Mark any registers set in INSN as live,
1838 and mark them as conflicting with all other live regs.
1839 Clobbers are processed again, so they conflict with
1840 the registers that are set. */
1842 note_stores (PATTERN (insn), mark_reg_store, NULL);
1844 #ifdef AUTO_INC_DEC
1845 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1846 if (REG_NOTE_KIND (link) == REG_INC)
1847 mark_reg_store (XEXP (link, 0), NULL_RTX, NULL);
1848 #endif
1849 while (n_regs_set-- > 0)
1851 rtx note = find_regno_note (insn, REG_UNUSED,
1852 REGNO (regs_set[n_regs_set]));
1853 if (! note)
1854 continue;
1856 mark_reg_death (XEXP (note, 0));
1860 bitmap_clear (&curr_regs_live);
1861 if (flag_ira_region == IRA_REGION_MIXED
1862 || flag_ira_region == IRA_REGION_ALL)
1863 FOR_EACH_LOOP (li, loop, 0)
1865 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
1866 if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
1868 enum reg_class pressure_class;
1869 int nregs;
1871 pressure_class = get_regno_pressure_class (j, &nregs);
1872 LOOP_DATA (loop)->max_reg_pressure[pressure_class] -= nregs;
1875 if (dump_file == NULL)
1876 return;
1877 FOR_EACH_LOOP (li, loop, 0)
1879 parent = loop_outer (loop);
1880 fprintf (dump_file, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
1881 loop->num, (parent == NULL ? -1 : parent->num),
1882 loop->header->index, loop_depth (loop));
1883 fprintf (dump_file, "\n ref. regnos:");
1884 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_ref, 0, j, bi)
1885 fprintf (dump_file, " %d", j);
1886 fprintf (dump_file, "\n live regnos:");
1887 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
1888 fprintf (dump_file, " %d", j);
1889 fprintf (dump_file, "\n Pressure:");
1890 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1892 enum reg_class pressure_class;
1894 pressure_class = ira_pressure_classes[i];
1895 if (LOOP_DATA (loop)->max_reg_pressure[pressure_class] == 0)
1896 continue;
1897 fprintf (dump_file, " %s=%d", reg_class_names[pressure_class],
1898 LOOP_DATA (loop)->max_reg_pressure[pressure_class]);
1900 fprintf (dump_file, "\n");
1906 /* Move the invariants out of the loops. */
1908 void
1909 move_loop_invariants (void)
1911 struct loop *loop;
1912 loop_iterator li;
1914 if (flag_ira_loop_pressure)
1916 df_analyze ();
1917 regstat_init_n_sets_and_refs ();
1918 ira_set_pseudo_classes (dump_file);
1919 calculate_loop_reg_pressure ();
1920 regstat_free_n_sets_and_refs ();
1922 df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
1923 /* Process the loops, innermost first. */
1924 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1926 curr_loop = loop;
1927 /* move_single_loop_invariants for very large loops
1928 is time consuming and might need a lot of memory. */
1929 if (loop->num_nodes <= (unsigned) LOOP_INVARIANT_MAX_BBS_IN_LOOP)
1930 move_single_loop_invariants (loop);
1933 FOR_EACH_LOOP (li, loop, 0)
1935 free_loop_data (loop);
1938 if (flag_ira_loop_pressure)
1939 /* There is no sense to keep this info because it was most
1940 probably outdated by subsequent passes. */
1941 free_reg_info ();
1942 free (invariant_table);
1943 invariant_table = NULL;
1944 invariant_table_size = 0;
1946 #ifdef ENABLE_CHECKING
1947 verify_flow_info ();
1948 #endif