Daily bump.
[official-gcc.git] / gcc / loop-invariant.c
blob8c4415e3fe54dd1e1339fdac31a0a3552b2f6a2a
1 /* RTL-level loop invariant motion.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This implements the loop invariant motion pass. It is very simple
21 (no calls, no loads/stores, etc.). This should be sufficient to cleanup
22 things like address arithmetics -- other more complicated invariants should
23 be eliminated on GIMPLE either in tree-ssa-loop-im.c or in tree-ssa-pre.c.
25 We proceed loop by loop -- it is simpler than trying to handle things
26 globally and should not lose much. First we inspect all sets inside loop
27 and create a dependency graph on insns (saying "to move this insn, you must
28 also move the following insns").
30 We then need to determine what to move. We estimate the number of registers
31 used and move as many invariants as possible while we still have enough free
32 registers. We prefer the expensive invariants.
34 Then we move the selected invariants out of the loop, creating a new
35 temporaries for them if necessary. */
37 #include "config.h"
38 #include "system.h"
39 #include "coretypes.h"
40 #include "backend.h"
41 #include "target.h"
42 #include "rtl.h"
43 #include "tree.h"
44 #include "cfghooks.h"
45 #include "df.h"
46 #include "tm_p.h"
47 #include "insn-config.h"
48 #include "regs.h"
49 #include "ira.h"
50 #include "recog.h"
51 #include "cfgrtl.h"
52 #include "cfgloop.h"
53 #include "expr.h"
54 #include "params.h"
55 #include "rtl-iter.h"
56 #include "dumpfile.h"
58 /* The data stored for the loop. */
60 struct loop_data
62 struct loop *outermost_exit; /* The outermost exit of the loop. */
63 bool has_call; /* True if the loop contains a call. */
64 /* Maximal register pressure inside loop for given register class
65 (defined only for the pressure classes). */
66 int max_reg_pressure[N_REG_CLASSES];
67 /* Loop regs referenced and live pseudo-registers. */
68 bitmap_head regs_ref;
69 bitmap_head regs_live;
72 #define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
74 /* The description of an use. */
76 struct use
78 rtx *pos; /* Position of the use. */
79 rtx_insn *insn; /* The insn in that the use occurs. */
80 unsigned addr_use_p; /* Whether the use occurs in an address. */
81 struct use *next; /* Next use in the list. */
84 /* The description of a def. */
86 struct def
88 struct use *uses; /* The list of uses that are uniquely reached
89 by it. */
90 unsigned n_uses; /* Number of such uses. */
91 unsigned n_addr_uses; /* Number of uses in addresses. */
92 unsigned invno; /* The corresponding invariant. */
93 bool can_prop_to_addr_uses; /* True if the corresponding inv can be
94 propagated into its address uses. */
97 /* The data stored for each invariant. */
99 struct invariant
101 /* The number of the invariant. */
102 unsigned invno;
104 /* The number of the invariant with the same value. */
105 unsigned eqto;
107 /* The number of invariants which eqto this. */
108 unsigned eqno;
110 /* If we moved the invariant out of the loop, the register that contains its
111 value. */
112 rtx reg;
114 /* If we moved the invariant out of the loop, the original regno
115 that contained its value. */
116 int orig_regno;
118 /* The definition of the invariant. */
119 struct def *def;
121 /* The insn in that it is defined. */
122 rtx_insn *insn;
124 /* Whether it is always executed. */
125 bool always_executed;
127 /* Whether to move the invariant. */
128 bool move;
130 /* Whether the invariant is cheap when used as an address. */
131 bool cheap_address;
133 /* Cost of the invariant. */
134 unsigned cost;
136 /* The invariants it depends on. */
137 bitmap depends_on;
139 /* Used for detecting already visited invariants during determining
140 costs of movements. */
141 unsigned stamp;
144 /* Currently processed loop. */
145 static struct loop *curr_loop;
147 /* Table of invariants indexed by the df_ref uid field. */
149 static unsigned int invariant_table_size = 0;
150 static struct invariant ** invariant_table;
152 /* Entry for hash table of invariant expressions. */
154 struct invariant_expr_entry
156 /* The invariant. */
157 struct invariant *inv;
159 /* Its value. */
160 rtx expr;
162 /* Its mode. */
163 machine_mode mode;
165 /* Its hash. */
166 hashval_t hash;
169 /* The actual stamp for marking already visited invariants during determining
170 costs of movements. */
172 static unsigned actual_stamp;
174 typedef struct invariant *invariant_p;
177 /* The invariants. */
179 static vec<invariant_p> invariants;
181 /* Check the size of the invariant table and realloc if necessary. */
183 static void
184 check_invariant_table_size (void)
186 if (invariant_table_size < DF_DEFS_TABLE_SIZE ())
188 unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
189 invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
190 memset (&invariant_table[invariant_table_size], 0,
191 (new_size - invariant_table_size) * sizeof (struct invariant *));
192 invariant_table_size = new_size;
196 /* Test for possibility of invariantness of X. */
198 static bool
199 check_maybe_invariant (rtx x)
201 enum rtx_code code = GET_CODE (x);
202 int i, j;
203 const char *fmt;
205 switch (code)
207 CASE_CONST_ANY:
208 case SYMBOL_REF:
209 case CONST:
210 case LABEL_REF:
211 return true;
213 case PC:
214 case CC0:
215 case UNSPEC_VOLATILE:
216 case CALL:
217 return false;
219 case REG:
220 return true;
222 case MEM:
223 /* Load/store motion is done elsewhere. ??? Perhaps also add it here?
224 It should not be hard, and might be faster than "elsewhere". */
226 /* Just handle the most trivial case where we load from an unchanging
227 location (most importantly, pic tables). */
228 if (MEM_READONLY_P (x) && !MEM_VOLATILE_P (x))
229 break;
231 return false;
233 case ASM_OPERANDS:
234 /* Don't mess with insns declared volatile. */
235 if (MEM_VOLATILE_P (x))
236 return false;
237 break;
239 default:
240 break;
243 fmt = GET_RTX_FORMAT (code);
244 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
246 if (fmt[i] == 'e')
248 if (!check_maybe_invariant (XEXP (x, i)))
249 return false;
251 else if (fmt[i] == 'E')
253 for (j = 0; j < XVECLEN (x, i); j++)
254 if (!check_maybe_invariant (XVECEXP (x, i, j)))
255 return false;
259 return true;
262 /* Returns the invariant definition for USE, or NULL if USE is not
263 invariant. */
265 static struct invariant *
266 invariant_for_use (df_ref use)
268 struct df_link *defs;
269 df_ref def;
270 basic_block bb = DF_REF_BB (use), def_bb;
272 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
273 return NULL;
275 defs = DF_REF_CHAIN (use);
276 if (!defs || defs->next)
277 return NULL;
278 def = defs->ref;
279 check_invariant_table_size ();
280 if (!invariant_table[DF_REF_ID (def)])
281 return NULL;
283 def_bb = DF_REF_BB (def);
284 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
285 return NULL;
286 return invariant_table[DF_REF_ID (def)];
289 /* Computes hash value for invariant expression X in INSN. */
291 static hashval_t
292 hash_invariant_expr_1 (rtx_insn *insn, rtx x)
294 enum rtx_code code = GET_CODE (x);
295 int i, j;
296 const char *fmt;
297 hashval_t val = code;
298 int do_not_record_p;
299 df_ref use;
300 struct invariant *inv;
302 switch (code)
304 CASE_CONST_ANY:
305 case SYMBOL_REF:
306 case CONST:
307 case LABEL_REF:
308 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
310 case REG:
311 use = df_find_use (insn, x);
312 if (!use)
313 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
314 inv = invariant_for_use (use);
315 if (!inv)
316 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
318 gcc_assert (inv->eqto != ~0u);
319 return inv->eqto;
321 default:
322 break;
325 fmt = GET_RTX_FORMAT (code);
326 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
328 if (fmt[i] == 'e')
329 val ^= hash_invariant_expr_1 (insn, XEXP (x, i));
330 else if (fmt[i] == 'E')
332 for (j = 0; j < XVECLEN (x, i); j++)
333 val ^= hash_invariant_expr_1 (insn, XVECEXP (x, i, j));
335 else if (fmt[i] == 'i' || fmt[i] == 'n')
336 val ^= XINT (x, i);
339 return val;
342 /* Returns true if the invariant expressions E1 and E2 used in insns INSN1
343 and INSN2 have always the same value. */
345 static bool
346 invariant_expr_equal_p (rtx_insn *insn1, rtx e1, rtx_insn *insn2, rtx e2)
348 enum rtx_code code = GET_CODE (e1);
349 int i, j;
350 const char *fmt;
351 df_ref use1, use2;
352 struct invariant *inv1 = NULL, *inv2 = NULL;
353 rtx sub1, sub2;
355 /* If mode of only one of the operands is VOIDmode, it is not equivalent to
356 the other one. If both are VOIDmode, we rely on the caller of this
357 function to verify that their modes are the same. */
358 if (code != GET_CODE (e2) || GET_MODE (e1) != GET_MODE (e2))
359 return false;
361 switch (code)
363 CASE_CONST_ANY:
364 case SYMBOL_REF:
365 case CONST:
366 case LABEL_REF:
367 return rtx_equal_p (e1, e2);
369 case REG:
370 use1 = df_find_use (insn1, e1);
371 use2 = df_find_use (insn2, e2);
372 if (use1)
373 inv1 = invariant_for_use (use1);
374 if (use2)
375 inv2 = invariant_for_use (use2);
377 if (!inv1 && !inv2)
378 return rtx_equal_p (e1, e2);
380 if (!inv1 || !inv2)
381 return false;
383 gcc_assert (inv1->eqto != ~0u);
384 gcc_assert (inv2->eqto != ~0u);
385 return inv1->eqto == inv2->eqto;
387 default:
388 break;
391 fmt = GET_RTX_FORMAT (code);
392 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
394 if (fmt[i] == 'e')
396 sub1 = XEXP (e1, i);
397 sub2 = XEXP (e2, i);
399 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
400 return false;
403 else if (fmt[i] == 'E')
405 if (XVECLEN (e1, i) != XVECLEN (e2, i))
406 return false;
408 for (j = 0; j < XVECLEN (e1, i); j++)
410 sub1 = XVECEXP (e1, i, j);
411 sub2 = XVECEXP (e2, i, j);
413 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
414 return false;
417 else if (fmt[i] == 'i' || fmt[i] == 'n')
419 if (XINT (e1, i) != XINT (e2, i))
420 return false;
422 /* Unhandled type of subexpression, we fail conservatively. */
423 else
424 return false;
427 return true;
430 struct invariant_expr_hasher : free_ptr_hash <invariant_expr_entry>
432 static inline hashval_t hash (const invariant_expr_entry *);
433 static inline bool equal (const invariant_expr_entry *,
434 const invariant_expr_entry *);
437 /* Returns hash value for invariant expression entry ENTRY. */
439 inline hashval_t
440 invariant_expr_hasher::hash (const invariant_expr_entry *entry)
442 return entry->hash;
445 /* Compares invariant expression entries ENTRY1 and ENTRY2. */
447 inline bool
448 invariant_expr_hasher::equal (const invariant_expr_entry *entry1,
449 const invariant_expr_entry *entry2)
451 if (entry1->mode != entry2->mode)
452 return 0;
454 return invariant_expr_equal_p (entry1->inv->insn, entry1->expr,
455 entry2->inv->insn, entry2->expr);
458 typedef hash_table<invariant_expr_hasher> invariant_htab_type;
460 /* Checks whether invariant with value EXPR in machine mode MODE is
461 recorded in EQ. If this is the case, return the invariant. Otherwise
462 insert INV to the table for this expression and return INV. */
464 static struct invariant *
465 find_or_insert_inv (invariant_htab_type *eq, rtx expr, machine_mode mode,
466 struct invariant *inv)
468 hashval_t hash = hash_invariant_expr_1 (inv->insn, expr);
469 struct invariant_expr_entry *entry;
470 struct invariant_expr_entry pentry;
471 invariant_expr_entry **slot;
473 pentry.expr = expr;
474 pentry.inv = inv;
475 pentry.mode = mode;
476 slot = eq->find_slot_with_hash (&pentry, hash, INSERT);
477 entry = *slot;
479 if (entry)
480 return entry->inv;
482 entry = XNEW (struct invariant_expr_entry);
483 entry->inv = inv;
484 entry->expr = expr;
485 entry->mode = mode;
486 entry->hash = hash;
487 *slot = entry;
489 return inv;
492 /* Finds invariants identical to INV and records the equivalence. EQ is the
493 hash table of the invariants. */
495 static void
496 find_identical_invariants (invariant_htab_type *eq, struct invariant *inv)
498 unsigned depno;
499 bitmap_iterator bi;
500 struct invariant *dep;
501 rtx expr, set;
502 machine_mode mode;
503 struct invariant *tmp;
505 if (inv->eqto != ~0u)
506 return;
508 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
510 dep = invariants[depno];
511 find_identical_invariants (eq, dep);
514 set = single_set (inv->insn);
515 expr = SET_SRC (set);
516 mode = GET_MODE (expr);
517 if (mode == VOIDmode)
518 mode = GET_MODE (SET_DEST (set));
520 tmp = find_or_insert_inv (eq, expr, mode, inv);
521 inv->eqto = tmp->invno;
523 if (tmp->invno != inv->invno && inv->always_executed)
524 tmp->eqno++;
526 if (dump_file && inv->eqto != inv->invno)
527 fprintf (dump_file,
528 "Invariant %d is equivalent to invariant %d.\n",
529 inv->invno, inv->eqto);
532 /* Find invariants with the same value and record the equivalences. */
534 static void
535 merge_identical_invariants (void)
537 unsigned i;
538 struct invariant *inv;
539 invariant_htab_type eq (invariants.length ());
541 FOR_EACH_VEC_ELT (invariants, i, inv)
542 find_identical_invariants (&eq, inv);
545 /* Determines the basic blocks inside LOOP that are always executed and
546 stores their bitmap to ALWAYS_REACHED. MAY_EXIT is a bitmap of
547 basic blocks that may either exit the loop, or contain the call that
548 does not have to return. BODY is body of the loop obtained by
549 get_loop_body_in_dom_order. */
551 static void
552 compute_always_reached (struct loop *loop, basic_block *body,
553 bitmap may_exit, bitmap always_reached)
555 unsigned i;
557 for (i = 0; i < loop->num_nodes; i++)
559 if (dominated_by_p (CDI_DOMINATORS, loop->latch, body[i]))
560 bitmap_set_bit (always_reached, i);
562 if (bitmap_bit_p (may_exit, i))
563 return;
567 /* Finds exits out of the LOOP with body BODY. Marks blocks in that we may
568 exit the loop by cfg edge to HAS_EXIT and MAY_EXIT. In MAY_EXIT
569 additionally mark blocks that may exit due to a call. */
571 static void
572 find_exits (struct loop *loop, basic_block *body,
573 bitmap may_exit, bitmap has_exit)
575 unsigned i;
576 edge_iterator ei;
577 edge e;
578 struct loop *outermost_exit = loop, *aexit;
579 bool has_call = false;
580 rtx_insn *insn;
582 for (i = 0; i < loop->num_nodes; i++)
584 if (body[i]->loop_father == loop)
586 FOR_BB_INSNS (body[i], insn)
588 if (CALL_P (insn)
589 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
590 || !RTL_CONST_OR_PURE_CALL_P (insn)))
592 has_call = true;
593 bitmap_set_bit (may_exit, i);
594 break;
598 FOR_EACH_EDGE (e, ei, body[i]->succs)
600 if (! flow_bb_inside_loop_p (loop, e->dest))
602 bitmap_set_bit (may_exit, i);
603 bitmap_set_bit (has_exit, i);
604 outermost_exit = find_common_loop (outermost_exit,
605 e->dest->loop_father);
607 /* If we enter a subloop that might never terminate treat
608 it like a possible exit. */
609 if (flow_loop_nested_p (loop, e->dest->loop_father))
610 bitmap_set_bit (may_exit, i);
612 continue;
615 /* Use the data stored for the subloop to decide whether we may exit
616 through it. It is sufficient to do this for header of the loop,
617 as other basic blocks inside it must be dominated by it. */
618 if (body[i]->loop_father->header != body[i])
619 continue;
621 if (LOOP_DATA (body[i]->loop_father)->has_call)
623 has_call = true;
624 bitmap_set_bit (may_exit, i);
626 aexit = LOOP_DATA (body[i]->loop_father)->outermost_exit;
627 if (aexit != loop)
629 bitmap_set_bit (may_exit, i);
630 bitmap_set_bit (has_exit, i);
632 if (flow_loop_nested_p (aexit, outermost_exit))
633 outermost_exit = aexit;
637 if (loop->aux == NULL)
639 loop->aux = xcalloc (1, sizeof (struct loop_data));
640 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
641 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
643 LOOP_DATA (loop)->outermost_exit = outermost_exit;
644 LOOP_DATA (loop)->has_call = has_call;
647 /* Check whether we may assign a value to X from a register. */
649 static bool
650 may_assign_reg_p (rtx x)
652 return (GET_MODE (x) != VOIDmode
653 && GET_MODE (x) != BLKmode
654 && can_copy_p (GET_MODE (x))
655 && (!REG_P (x)
656 || !HARD_REGISTER_P (x)
657 || REGNO_REG_CLASS (REGNO (x)) != NO_REGS));
660 /* Finds definitions that may correspond to invariants in LOOP with body
661 BODY. */
663 static void
664 find_defs (struct loop *loop)
666 if (dump_file)
668 fprintf (dump_file,
669 "*****starting processing of loop %d ******\n",
670 loop->num);
673 df_remove_problem (df_chain);
674 df_process_deferred_rescans ();
675 df_chain_add_problem (DF_UD_CHAIN);
676 df_live_add_problem ();
677 df_live_set_all_dirty ();
678 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
679 df_analyze_loop (loop);
680 check_invariant_table_size ();
682 if (dump_file)
684 df_dump_region (dump_file);
685 fprintf (dump_file,
686 "*****ending processing of loop %d ******\n",
687 loop->num);
691 /* Creates a new invariant for definition DEF in INSN, depending on invariants
692 in DEPENDS_ON. ALWAYS_EXECUTED is true if the insn is always executed,
693 unless the program ends due to a function call. The newly created invariant
694 is returned. */
696 static struct invariant *
697 create_new_invariant (struct def *def, rtx_insn *insn, bitmap depends_on,
698 bool always_executed)
700 struct invariant *inv = XNEW (struct invariant);
701 rtx set = single_set (insn);
702 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
704 inv->def = def;
705 inv->always_executed = always_executed;
706 inv->depends_on = depends_on;
708 /* If the set is simple, usually by moving it we move the whole store out of
709 the loop. Otherwise we save only cost of the computation. */
710 if (def)
712 inv->cost = set_rtx_cost (set, speed);
713 /* ??? Try to determine cheapness of address computation. Unfortunately
714 the address cost is only a relative measure, we can't really compare
715 it with any absolute number, but only with other address costs.
716 But here we don't have any other addresses, so compare with a magic
717 number anyway. It has to be large enough to not regress PR33928
718 (by avoiding to move reg+8,reg+16,reg+24 invariants), but small
719 enough to not regress 410.bwaves either (by still moving reg+reg
720 invariants).
721 See http://gcc.gnu.org/ml/gcc-patches/2009-10/msg01210.html . */
722 if (SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set))))
723 inv->cheap_address = address_cost (SET_SRC (set), word_mode,
724 ADDR_SPACE_GENERIC, speed) < 3;
725 else
726 inv->cheap_address = false;
728 else
730 inv->cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)),
731 speed);
732 inv->cheap_address = false;
735 inv->move = false;
736 inv->reg = NULL_RTX;
737 inv->orig_regno = -1;
738 inv->stamp = 0;
739 inv->insn = insn;
741 inv->invno = invariants.length ();
742 inv->eqto = ~0u;
744 /* Itself. */
745 inv->eqno = 1;
747 if (def)
748 def->invno = inv->invno;
749 invariants.safe_push (inv);
751 if (dump_file)
753 fprintf (dump_file,
754 "Set in insn %d is invariant (%d), cost %d, depends on ",
755 INSN_UID (insn), inv->invno, inv->cost);
756 dump_bitmap (dump_file, inv->depends_on);
759 return inv;
762 /* Return a canonical version of X for the address, from the point of view,
763 that all multiplications are represented as MULT instead of the multiply
764 by a power of 2 being represented as ASHIFT.
766 Callers should prepare a copy of X because this function may modify it
767 in place. */
769 static void
770 canonicalize_address_mult (rtx x)
772 subrtx_var_iterator::array_type array;
773 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
775 rtx sub = *iter;
777 if (GET_CODE (sub) == ASHIFT
778 && CONST_INT_P (XEXP (sub, 1))
779 && INTVAL (XEXP (sub, 1)) < GET_MODE_BITSIZE (GET_MODE (sub))
780 && INTVAL (XEXP (sub, 1)) >= 0)
782 HOST_WIDE_INT shift = INTVAL (XEXP (sub, 1));
783 PUT_CODE (sub, MULT);
784 XEXP (sub, 1) = gen_int_mode ((HOST_WIDE_INT) 1 << shift,
785 GET_MODE (sub));
786 iter.skip_subrtxes ();
791 /* Maximum number of sub expressions in address. We set it to
792 a small integer since it's unlikely to have a complicated
793 address expression. */
795 #define MAX_CANON_ADDR_PARTS (5)
797 /* Collect sub expressions in address X with PLUS as the seperator.
798 Sub expressions are stored in vector ADDR_PARTS. */
800 static void
801 collect_address_parts (rtx x, vec<rtx> *addr_parts)
803 subrtx_var_iterator::array_type array;
804 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
806 rtx sub = *iter;
808 if (GET_CODE (sub) != PLUS)
810 addr_parts->safe_push (sub);
811 iter.skip_subrtxes ();
816 /* Compare function for sorting sub expressions X and Y based on
817 precedence defined for communitive operations. */
819 static int
820 compare_address_parts (const void *x, const void *y)
822 const rtx *rx = (const rtx *)x;
823 const rtx *ry = (const rtx *)y;
824 int px = commutative_operand_precedence (*rx);
825 int py = commutative_operand_precedence (*ry);
827 return (py - px);
830 /* Return a canonical version address for X by following steps:
831 1) Rewrite ASHIFT into MULT recursively.
832 2) Divide address into sub expressions with PLUS as the
833 separator.
834 3) Sort sub expressions according to precedence defined
835 for communative operations.
836 4) Simplify CONST_INT_P sub expressions.
837 5) Create new canonicalized address and return.
838 Callers should prepare a copy of X because this function may
839 modify it in place. */
841 static rtx
842 canonicalize_address (rtx x)
844 rtx res;
845 unsigned int i, j;
846 machine_mode mode = GET_MODE (x);
847 auto_vec<rtx, MAX_CANON_ADDR_PARTS> addr_parts;
849 /* Rewrite ASHIFT into MULT. */
850 canonicalize_address_mult (x);
851 /* Divide address into sub expressions. */
852 collect_address_parts (x, &addr_parts);
853 /* Unlikely to have very complicated address. */
854 if (addr_parts.length () < 2
855 || addr_parts.length () > MAX_CANON_ADDR_PARTS)
856 return x;
858 /* Sort sub expressions according to canonicalization precedence. */
859 addr_parts.qsort (compare_address_parts);
861 /* Simplify all constant int summary if possible. */
862 for (i = 0; i < addr_parts.length (); i++)
863 if (CONST_INT_P (addr_parts[i]))
864 break;
866 for (j = i + 1; j < addr_parts.length (); j++)
868 gcc_assert (CONST_INT_P (addr_parts[j]));
869 addr_parts[i] = simplify_gen_binary (PLUS, mode,
870 addr_parts[i],
871 addr_parts[j]);
874 /* Chain PLUS operators to the left for !CONST_INT_P sub expressions. */
875 res = addr_parts[0];
876 for (j = 1; j < i; j++)
877 res = simplify_gen_binary (PLUS, mode, res, addr_parts[j]);
879 /* Pickup the last CONST_INT_P sub expression. */
880 if (i < addr_parts.length ())
881 res = simplify_gen_binary (PLUS, mode, res, addr_parts[i]);
883 return res;
886 /* Given invariant DEF and its address USE, check if the corresponding
887 invariant expr can be propagated into the use or not. */
889 static bool
890 inv_can_prop_to_addr_use (struct def *def, df_ref use)
892 struct invariant *inv;
893 rtx *pos = DF_REF_REAL_LOC (use), def_set, use_set;
894 rtx_insn *use_insn = DF_REF_INSN (use);
895 rtx_insn *def_insn;
896 bool ok;
898 inv = invariants[def->invno];
899 /* No need to check if address expression is expensive. */
900 if (!inv->cheap_address)
901 return false;
903 def_insn = inv->insn;
904 def_set = single_set (def_insn);
905 if (!def_set)
906 return false;
908 validate_unshare_change (use_insn, pos, SET_SRC (def_set), true);
909 ok = verify_changes (0);
910 /* Try harder with canonicalization in address expression. */
911 if (!ok && (use_set = single_set (use_insn)) != NULL_RTX)
913 rtx src, dest, mem = NULL_RTX;
915 src = SET_SRC (use_set);
916 dest = SET_DEST (use_set);
917 if (MEM_P (src))
918 mem = src;
919 else if (MEM_P (dest))
920 mem = dest;
922 if (mem != NULL_RTX
923 && !memory_address_addr_space_p (GET_MODE (mem),
924 XEXP (mem, 0),
925 MEM_ADDR_SPACE (mem)))
927 rtx addr = canonicalize_address (copy_rtx (XEXP (mem, 0)));
928 if (memory_address_addr_space_p (GET_MODE (mem),
929 addr, MEM_ADDR_SPACE (mem)))
930 ok = true;
933 cancel_changes (0);
934 return ok;
937 /* Record USE at DEF. */
939 static void
940 record_use (struct def *def, df_ref use)
942 struct use *u = XNEW (struct use);
944 u->pos = DF_REF_REAL_LOC (use);
945 u->insn = DF_REF_INSN (use);
946 u->addr_use_p = (DF_REF_TYPE (use) == DF_REF_REG_MEM_LOAD
947 || DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE);
948 u->next = def->uses;
949 def->uses = u;
950 def->n_uses++;
951 if (u->addr_use_p)
953 /* Initialize propagation information if this is the first addr
954 use of the inv def. */
955 if (def->n_addr_uses == 0)
956 def->can_prop_to_addr_uses = true;
958 def->n_addr_uses++;
959 if (def->can_prop_to_addr_uses && !inv_can_prop_to_addr_use (def, use))
960 def->can_prop_to_addr_uses = false;
964 /* Finds the invariants USE depends on and store them to the DEPENDS_ON
965 bitmap. Returns true if all dependencies of USE are known to be
966 loop invariants, false otherwise. */
968 static bool
969 check_dependency (basic_block bb, df_ref use, bitmap depends_on)
971 df_ref def;
972 basic_block def_bb;
973 struct df_link *defs;
974 struct def *def_data;
975 struct invariant *inv;
977 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
978 return false;
980 defs = DF_REF_CHAIN (use);
981 if (!defs)
983 unsigned int regno = DF_REF_REGNO (use);
985 /* If this is the use of an uninitialized argument register that is
986 likely to be spilled, do not move it lest this might extend its
987 lifetime and cause reload to die. This can occur for a call to
988 a function taking complex number arguments and moving the insns
989 preparing the arguments without moving the call itself wouldn't
990 gain much in practice. */
991 if ((DF_REF_FLAGS (use) & DF_HARD_REG_LIVE)
992 && FUNCTION_ARG_REGNO_P (regno)
993 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno)))
994 return false;
996 return true;
999 if (defs->next)
1000 return false;
1002 def = defs->ref;
1003 check_invariant_table_size ();
1004 inv = invariant_table[DF_REF_ID (def)];
1005 if (!inv)
1006 return false;
1008 def_data = inv->def;
1009 gcc_assert (def_data != NULL);
1011 def_bb = DF_REF_BB (def);
1012 /* Note that in case bb == def_bb, we know that the definition
1013 dominates insn, because def has invariant_table[DF_REF_ID(def)]
1014 defined and we process the insns in the basic block bb
1015 sequentially. */
1016 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
1017 return false;
1019 bitmap_set_bit (depends_on, def_data->invno);
1020 return true;
1024 /* Finds the invariants INSN depends on and store them to the DEPENDS_ON
1025 bitmap. Returns true if all dependencies of INSN are known to be
1026 loop invariants, false otherwise. */
1028 static bool
1029 check_dependencies (rtx_insn *insn, bitmap depends_on)
1031 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1032 df_ref use;
1033 basic_block bb = BLOCK_FOR_INSN (insn);
1035 FOR_EACH_INSN_INFO_USE (use, insn_info)
1036 if (!check_dependency (bb, use, depends_on))
1037 return false;
1038 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1039 if (!check_dependency (bb, use, depends_on))
1040 return false;
1042 return true;
1045 /* Pre-check candidate DEST to skip the one which can not make a valid insn
1046 during move_invariant_reg. SIMPLE is to skip HARD_REGISTER. */
1047 static bool
1048 pre_check_invariant_p (bool simple, rtx dest)
1050 if (simple && REG_P (dest) && DF_REG_DEF_COUNT (REGNO (dest)) > 1)
1052 df_ref use;
1053 unsigned int i = REGNO (dest);
1054 struct df_insn_info *insn_info;
1055 df_ref def_rec;
1057 for (use = DF_REG_USE_CHAIN (i); use; use = DF_REF_NEXT_REG (use))
1059 rtx_insn *ref = DF_REF_INSN (use);
1060 insn_info = DF_INSN_INFO_GET (ref);
1062 FOR_EACH_INSN_INFO_DEF (def_rec, insn_info)
1063 if (DF_REF_REGNO (def_rec) == i)
1065 /* Multi definitions at this stage, most likely are due to
1066 instruction constraints, which requires both read and write
1067 on the same register. Since move_invariant_reg is not
1068 powerful enough to handle such cases, just ignore the INV
1069 and leave the chance to others. */
1070 return false;
1074 return true;
1077 /* Finds invariant in INSN. ALWAYS_REACHED is true if the insn is always
1078 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1079 unless the program ends due to a function call. */
1081 static void
1082 find_invariant_insn (rtx_insn *insn, bool always_reached, bool always_executed)
1084 df_ref ref;
1085 struct def *def;
1086 bitmap depends_on;
1087 rtx set, dest;
1088 bool simple = true;
1089 struct invariant *inv;
1091 /* We can't move a CC0 setter without the user. */
1092 if (HAVE_cc0 && sets_cc0_p (insn))
1093 return;
1095 set = single_set (insn);
1096 if (!set)
1097 return;
1098 dest = SET_DEST (set);
1100 if (!REG_P (dest)
1101 || HARD_REGISTER_P (dest))
1102 simple = false;
1104 if (!may_assign_reg_p (dest)
1105 || !pre_check_invariant_p (simple, dest)
1106 || !check_maybe_invariant (SET_SRC (set)))
1107 return;
1109 /* If the insn can throw exception, we cannot move it at all without changing
1110 cfg. */
1111 if (can_throw_internal (insn))
1112 return;
1114 /* We cannot make trapping insn executed, unless it was executed before. */
1115 if (may_trap_or_fault_p (PATTERN (insn)) && !always_reached)
1116 return;
1118 depends_on = BITMAP_ALLOC (NULL);
1119 if (!check_dependencies (insn, depends_on))
1121 BITMAP_FREE (depends_on);
1122 return;
1125 if (simple)
1126 def = XCNEW (struct def);
1127 else
1128 def = NULL;
1130 inv = create_new_invariant (def, insn, depends_on, always_executed);
1132 if (simple)
1134 ref = df_find_def (insn, dest);
1135 check_invariant_table_size ();
1136 invariant_table[DF_REF_ID (ref)] = inv;
1140 /* Record registers used in INSN that have a unique invariant definition. */
1142 static void
1143 record_uses (rtx_insn *insn)
1145 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1146 df_ref use;
1147 struct invariant *inv;
1149 FOR_EACH_INSN_INFO_USE (use, insn_info)
1151 inv = invariant_for_use (use);
1152 if (inv)
1153 record_use (inv->def, use);
1155 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1157 inv = invariant_for_use (use);
1158 if (inv)
1159 record_use (inv->def, use);
1163 /* Finds invariants in INSN. ALWAYS_REACHED is true if the insn is always
1164 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1165 unless the program ends due to a function call. */
1167 static void
1168 find_invariants_insn (rtx_insn *insn, bool always_reached, bool always_executed)
1170 find_invariant_insn (insn, always_reached, always_executed);
1171 record_uses (insn);
1174 /* Finds invariants in basic block BB. ALWAYS_REACHED is true if the
1175 basic block is always executed. ALWAYS_EXECUTED is true if the basic
1176 block is always executed, unless the program ends due to a function
1177 call. */
1179 static void
1180 find_invariants_bb (basic_block bb, bool always_reached, bool always_executed)
1182 rtx_insn *insn;
1184 FOR_BB_INSNS (bb, insn)
1186 if (!NONDEBUG_INSN_P (insn))
1187 continue;
1189 find_invariants_insn (insn, always_reached, always_executed);
1191 if (always_reached
1192 && CALL_P (insn)
1193 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
1194 || ! RTL_CONST_OR_PURE_CALL_P (insn)))
1195 always_reached = false;
1199 /* Finds invariants in LOOP with body BODY. ALWAYS_REACHED is the bitmap of
1200 basic blocks in BODY that are always executed. ALWAYS_EXECUTED is the
1201 bitmap of basic blocks in BODY that are always executed unless the program
1202 ends due to a function call. */
1204 static void
1205 find_invariants_body (struct loop *loop, basic_block *body,
1206 bitmap always_reached, bitmap always_executed)
1208 unsigned i;
1210 for (i = 0; i < loop->num_nodes; i++)
1211 find_invariants_bb (body[i],
1212 bitmap_bit_p (always_reached, i),
1213 bitmap_bit_p (always_executed, i));
1216 /* Finds invariants in LOOP. */
1218 static void
1219 find_invariants (struct loop *loop)
1221 bitmap may_exit = BITMAP_ALLOC (NULL);
1222 bitmap always_reached = BITMAP_ALLOC (NULL);
1223 bitmap has_exit = BITMAP_ALLOC (NULL);
1224 bitmap always_executed = BITMAP_ALLOC (NULL);
1225 basic_block *body = get_loop_body_in_dom_order (loop);
1227 find_exits (loop, body, may_exit, has_exit);
1228 compute_always_reached (loop, body, may_exit, always_reached);
1229 compute_always_reached (loop, body, has_exit, always_executed);
1231 find_defs (loop);
1232 find_invariants_body (loop, body, always_reached, always_executed);
1233 merge_identical_invariants ();
1235 BITMAP_FREE (always_reached);
1236 BITMAP_FREE (always_executed);
1237 BITMAP_FREE (may_exit);
1238 BITMAP_FREE (has_exit);
1239 free (body);
1242 /* Frees a list of uses USE. */
1244 static void
1245 free_use_list (struct use *use)
1247 struct use *next;
1249 for (; use; use = next)
1251 next = use->next;
1252 free (use);
1256 /* Return pressure class and number of hard registers (through *NREGS)
1257 for destination of INSN. */
1258 static enum reg_class
1259 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
1261 rtx reg;
1262 enum reg_class pressure_class;
1263 rtx set = single_set (insn);
1265 /* Considered invariant insns have only one set. */
1266 gcc_assert (set != NULL_RTX);
1267 reg = SET_DEST (set);
1268 if (GET_CODE (reg) == SUBREG)
1269 reg = SUBREG_REG (reg);
1270 if (MEM_P (reg))
1272 *nregs = 0;
1273 pressure_class = NO_REGS;
1275 else
1277 if (! REG_P (reg))
1278 reg = NULL_RTX;
1279 if (reg == NULL_RTX)
1280 pressure_class = GENERAL_REGS;
1281 else
1283 pressure_class = reg_allocno_class (REGNO (reg));
1284 pressure_class = ira_pressure_class_translate[pressure_class];
1286 *nregs
1287 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
1289 return pressure_class;
1292 /* Calculates cost and number of registers needed for moving invariant INV
1293 out of the loop and stores them to *COST and *REGS_NEEDED. *CL will be
1294 the REG_CLASS of INV. Return
1295 -1: if INV is invalid.
1296 0: if INV and its depends_on have same reg_class
1297 1: if INV and its depends_on have different reg_classes. */
1299 static int
1300 get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed,
1301 enum reg_class *cl)
1303 int i, acomp_cost;
1304 unsigned aregs_needed[N_REG_CLASSES];
1305 unsigned depno;
1306 struct invariant *dep;
1307 bitmap_iterator bi;
1308 int ret = 1;
1310 /* Find the representative of the class of the equivalent invariants. */
1311 inv = invariants[inv->eqto];
1313 *comp_cost = 0;
1314 if (! flag_ira_loop_pressure)
1315 regs_needed[0] = 0;
1316 else
1318 for (i = 0; i < ira_pressure_classes_num; i++)
1319 regs_needed[ira_pressure_classes[i]] = 0;
1322 if (inv->move
1323 || inv->stamp == actual_stamp)
1324 return -1;
1325 inv->stamp = actual_stamp;
1327 if (! flag_ira_loop_pressure)
1328 regs_needed[0]++;
1329 else
1331 int nregs;
1332 enum reg_class pressure_class;
1334 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1335 regs_needed[pressure_class] += nregs;
1336 *cl = pressure_class;
1337 ret = 0;
1340 if (!inv->cheap_address
1341 || inv->def->n_uses == 0
1342 || inv->def->n_addr_uses < inv->def->n_uses
1343 /* Count cost if the inv can't be propagated into address uses. */
1344 || !inv->def->can_prop_to_addr_uses)
1345 (*comp_cost) += inv->cost * inv->eqno;
1347 #ifdef STACK_REGS
1349 /* Hoisting constant pool constants into stack regs may cost more than
1350 just single register. On x87, the balance is affected both by the
1351 small number of FP registers, and by its register stack organization,
1352 that forces us to add compensation code in and around the loop to
1353 shuffle the operands to the top of stack before use, and pop them
1354 from the stack after the loop finishes.
1356 To model this effect, we increase the number of registers needed for
1357 stack registers by two: one register push, and one register pop.
1358 This usually has the effect that FP constant loads from the constant
1359 pool are not moved out of the loop.
1361 Note that this also means that dependent invariants can not be moved.
1362 However, the primary purpose of this pass is to move loop invariant
1363 address arithmetic out of loops, and address arithmetic that depends
1364 on floating point constants is unlikely to ever occur. */
1365 rtx set = single_set (inv->insn);
1366 if (set
1367 && IS_STACK_MODE (GET_MODE (SET_SRC (set)))
1368 && constant_pool_constant_p (SET_SRC (set)))
1370 if (flag_ira_loop_pressure)
1371 regs_needed[ira_stack_reg_pressure_class] += 2;
1372 else
1373 regs_needed[0] += 2;
1376 #endif
1378 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
1380 bool check_p;
1381 enum reg_class dep_cl = ALL_REGS;
1382 int dep_ret;
1384 dep = invariants[depno];
1386 /* If DEP is moved out of the loop, it is not a depends_on any more. */
1387 if (dep->move)
1388 continue;
1390 dep_ret = get_inv_cost (dep, &acomp_cost, aregs_needed, &dep_cl);
1392 if (! flag_ira_loop_pressure)
1393 check_p = aregs_needed[0] != 0;
1394 else
1396 for (i = 0; i < ira_pressure_classes_num; i++)
1397 if (aregs_needed[ira_pressure_classes[i]] != 0)
1398 break;
1399 check_p = i < ira_pressure_classes_num;
1401 if ((dep_ret == 1) || ((dep_ret == 0) && (*cl != dep_cl)))
1403 *cl = ALL_REGS;
1404 ret = 1;
1407 if (check_p
1408 /* We need to check always_executed, since if the original value of
1409 the invariant may be preserved, we may need to keep it in a
1410 separate register. TODO check whether the register has an
1411 use outside of the loop. */
1412 && dep->always_executed
1413 && !dep->def->uses->next)
1415 /* If this is a single use, after moving the dependency we will not
1416 need a new register. */
1417 if (! flag_ira_loop_pressure)
1418 aregs_needed[0]--;
1419 else
1421 int nregs;
1422 enum reg_class pressure_class;
1424 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1425 aregs_needed[pressure_class] -= nregs;
1429 if (! flag_ira_loop_pressure)
1430 regs_needed[0] += aregs_needed[0];
1431 else
1433 for (i = 0; i < ira_pressure_classes_num; i++)
1434 regs_needed[ira_pressure_classes[i]]
1435 += aregs_needed[ira_pressure_classes[i]];
1437 (*comp_cost) += acomp_cost;
1439 return ret;
1442 /* Calculates gain for eliminating invariant INV. REGS_USED is the number
1443 of registers used in the loop, NEW_REGS is the number of new variables
1444 already added due to the invariant motion. The number of registers needed
1445 for it is stored in *REGS_NEEDED. SPEED and CALL_P are flags passed
1446 through to estimate_reg_pressure_cost. */
1448 static int
1449 gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
1450 unsigned *new_regs, unsigned regs_used,
1451 bool speed, bool call_p)
1453 int comp_cost, size_cost;
1454 /* Workaround -Wmaybe-uninitialized false positive during
1455 profiledbootstrap by initializing it. */
1456 enum reg_class cl = NO_REGS;
1457 int ret;
1459 actual_stamp++;
1461 ret = get_inv_cost (inv, &comp_cost, regs_needed, &cl);
1463 if (! flag_ira_loop_pressure)
1465 size_cost = (estimate_reg_pressure_cost (new_regs[0] + regs_needed[0],
1466 regs_used, speed, call_p)
1467 - estimate_reg_pressure_cost (new_regs[0],
1468 regs_used, speed, call_p));
1470 else if (ret < 0)
1471 return -1;
1472 else if ((ret == 0) && (cl == NO_REGS))
1473 /* Hoist it anyway since it does not impact register pressure. */
1474 return 1;
1475 else
1477 int i;
1478 enum reg_class pressure_class;
1480 for (i = 0; i < ira_pressure_classes_num; i++)
1482 pressure_class = ira_pressure_classes[i];
1484 if (!reg_classes_intersect_p (pressure_class, cl))
1485 continue;
1487 if ((int) new_regs[pressure_class]
1488 + (int) regs_needed[pressure_class]
1489 + LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
1490 + IRA_LOOP_RESERVED_REGS
1491 > ira_class_hard_regs_num[pressure_class])
1492 break;
1494 if (i < ira_pressure_classes_num)
1495 /* There will be register pressure excess and we want not to
1496 make this loop invariant motion. All loop invariants with
1497 non-positive gains will be rejected in function
1498 find_invariants_to_move. Therefore we return the negative
1499 number here.
1501 One could think that this rejects also expensive loop
1502 invariant motions and this will hurt code performance.
1503 However numerous experiments with different heuristics
1504 taking invariant cost into account did not confirm this
1505 assumption. There are possible explanations for this
1506 result:
1507 o probably all expensive invariants were already moved out
1508 of the loop by PRE and gimple invariant motion pass.
1509 o expensive invariant execution will be hidden by insn
1510 scheduling or OOO processor hardware because usually such
1511 invariants have a lot of freedom to be executed
1512 out-of-order.
1513 Another reason for ignoring invariant cost vs spilling cost
1514 heuristics is also in difficulties to evaluate accurately
1515 spill cost at this stage. */
1516 return -1;
1517 else
1518 size_cost = 0;
1521 return comp_cost - size_cost;
1524 /* Finds invariant with best gain for moving. Returns the gain, stores
1525 the invariant in *BEST and number of registers needed for it to
1526 *REGS_NEEDED. REGS_USED is the number of registers used in the loop.
1527 NEW_REGS is the number of new variables already added due to invariant
1528 motion. */
1530 static int
1531 best_gain_for_invariant (struct invariant **best, unsigned *regs_needed,
1532 unsigned *new_regs, unsigned regs_used,
1533 bool speed, bool call_p)
1535 struct invariant *inv;
1536 int i, gain = 0, again;
1537 unsigned aregs_needed[N_REG_CLASSES], invno;
1539 FOR_EACH_VEC_ELT (invariants, invno, inv)
1541 if (inv->move)
1542 continue;
1544 /* Only consider the "representatives" of equivalent invariants. */
1545 if (inv->eqto != inv->invno)
1546 continue;
1548 again = gain_for_invariant (inv, aregs_needed, new_regs, regs_used,
1549 speed, call_p);
1550 if (again > gain)
1552 gain = again;
1553 *best = inv;
1554 if (! flag_ira_loop_pressure)
1555 regs_needed[0] = aregs_needed[0];
1556 else
1558 for (i = 0; i < ira_pressure_classes_num; i++)
1559 regs_needed[ira_pressure_classes[i]]
1560 = aregs_needed[ira_pressure_classes[i]];
1565 return gain;
1568 /* Marks invariant INVNO and all its dependencies for moving. */
1570 static void
1571 set_move_mark (unsigned invno, int gain)
1573 struct invariant *inv = invariants[invno];
1574 bitmap_iterator bi;
1576 /* Find the representative of the class of the equivalent invariants. */
1577 inv = invariants[inv->eqto];
1579 if (inv->move)
1580 return;
1581 inv->move = true;
1583 if (dump_file)
1585 if (gain >= 0)
1586 fprintf (dump_file, "Decided to move invariant %d -- gain %d\n",
1587 invno, gain);
1588 else
1589 fprintf (dump_file, "Decided to move dependent invariant %d\n",
1590 invno);
1593 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, invno, bi)
1595 set_move_mark (invno, -1);
1599 /* Determines which invariants to move. */
1601 static void
1602 find_invariants_to_move (bool speed, bool call_p)
1604 int gain;
1605 unsigned i, regs_used, regs_needed[N_REG_CLASSES], new_regs[N_REG_CLASSES];
1606 struct invariant *inv = NULL;
1608 if (!invariants.length ())
1609 return;
1611 if (flag_ira_loop_pressure)
1612 /* REGS_USED is actually never used when the flag is on. */
1613 regs_used = 0;
1614 else
1615 /* We do not really do a good job in estimating number of
1616 registers used; we put some initial bound here to stand for
1617 induction variables etc. that we do not detect. */
1619 unsigned int n_regs = DF_REG_SIZE (df);
1621 regs_used = 2;
1623 for (i = 0; i < n_regs; i++)
1625 if (!DF_REGNO_FIRST_DEF (i) && DF_REGNO_LAST_USE (i))
1627 /* This is a value that is used but not changed inside loop. */
1628 regs_used++;
1633 if (! flag_ira_loop_pressure)
1634 new_regs[0] = regs_needed[0] = 0;
1635 else
1637 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1638 new_regs[ira_pressure_classes[i]] = 0;
1640 while ((gain = best_gain_for_invariant (&inv, regs_needed,
1641 new_regs, regs_used,
1642 speed, call_p)) > 0)
1644 set_move_mark (inv->invno, gain);
1645 if (! flag_ira_loop_pressure)
1646 new_regs[0] += regs_needed[0];
1647 else
1649 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1650 new_regs[ira_pressure_classes[i]]
1651 += regs_needed[ira_pressure_classes[i]];
1656 /* Replace the uses, reached by the definition of invariant INV, by REG.
1658 IN_GROUP is nonzero if this is part of a group of changes that must be
1659 performed as a group. In that case, the changes will be stored. The
1660 function `apply_change_group' will validate and apply the changes. */
1662 static int
1663 replace_uses (struct invariant *inv, rtx reg, bool in_group)
1665 /* Replace the uses we know to be dominated. It saves work for copy
1666 propagation, and also it is necessary so that dependent invariants
1667 are computed right. */
1668 if (inv->def)
1670 struct use *use;
1671 for (use = inv->def->uses; use; use = use->next)
1672 validate_change (use->insn, use->pos, reg, true);
1674 /* If we aren't part of a larger group, apply the changes now. */
1675 if (!in_group)
1676 return apply_change_group ();
1679 return 1;
1682 /* Whether invariant INV setting REG can be moved out of LOOP, at the end of
1683 the block preceding its header. */
1685 static bool
1686 can_move_invariant_reg (struct loop *loop, struct invariant *inv, rtx reg)
1688 df_ref def, use;
1689 unsigned int dest_regno, defs_in_loop_count = 0;
1690 rtx_insn *insn = inv->insn;
1691 basic_block bb = BLOCK_FOR_INSN (inv->insn);
1693 /* We ignore hard register and memory access for cost and complexity reasons.
1694 Hard register are few at this stage and expensive to consider as they
1695 require building a separate data flow. Memory access would require using
1696 df_simulate_* and can_move_insns_across functions and is more complex. */
1697 if (!REG_P (reg) || HARD_REGISTER_P (reg))
1698 return false;
1700 /* Check whether the set is always executed. We could omit this condition if
1701 we know that the register is unused outside of the loop, but it does not
1702 seem worth finding out. */
1703 if (!inv->always_executed)
1704 return false;
1706 /* Check that all uses that would be dominated by def are already dominated
1707 by it. */
1708 dest_regno = REGNO (reg);
1709 for (use = DF_REG_USE_CHAIN (dest_regno); use; use = DF_REF_NEXT_REG (use))
1711 rtx_insn *use_insn;
1712 basic_block use_bb;
1714 use_insn = DF_REF_INSN (use);
1715 use_bb = BLOCK_FOR_INSN (use_insn);
1717 /* Ignore instruction considered for moving. */
1718 if (use_insn == insn)
1719 continue;
1721 /* Don't consider uses outside loop. */
1722 if (!flow_bb_inside_loop_p (loop, use_bb))
1723 continue;
1725 /* Don't move if a use is not dominated by def in insn. */
1726 if (use_bb == bb && DF_INSN_LUID (insn) >= DF_INSN_LUID (use_insn))
1727 return false;
1728 if (!dominated_by_p (CDI_DOMINATORS, use_bb, bb))
1729 return false;
1732 /* Check for other defs. Any other def in the loop might reach a use
1733 currently reached by the def in insn. */
1734 for (def = DF_REG_DEF_CHAIN (dest_regno); def; def = DF_REF_NEXT_REG (def))
1736 basic_block def_bb = DF_REF_BB (def);
1738 /* Defs in exit block cannot reach a use they weren't already. */
1739 if (single_succ_p (def_bb))
1741 basic_block def_bb_succ;
1743 def_bb_succ = single_succ (def_bb);
1744 if (!flow_bb_inside_loop_p (loop, def_bb_succ))
1745 continue;
1748 if (++defs_in_loop_count > 1)
1749 return false;
1752 return true;
1755 /* Move invariant INVNO out of the LOOP. Returns true if this succeeds, false
1756 otherwise. */
1758 static bool
1759 move_invariant_reg (struct loop *loop, unsigned invno)
1761 struct invariant *inv = invariants[invno];
1762 struct invariant *repr = invariants[inv->eqto];
1763 unsigned i;
1764 basic_block preheader = loop_preheader_edge (loop)->src;
1765 rtx reg, set, dest, note;
1766 bitmap_iterator bi;
1767 int regno = -1;
1769 if (inv->reg)
1770 return true;
1771 if (!repr->move)
1772 return false;
1774 /* If this is a representative of the class of equivalent invariants,
1775 really move the invariant. Otherwise just replace its use with
1776 the register used for the representative. */
1777 if (inv == repr)
1779 if (inv->depends_on)
1781 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, i, bi)
1783 if (!move_invariant_reg (loop, i))
1784 goto fail;
1788 /* If possible, just move the set out of the loop. Otherwise, we
1789 need to create a temporary register. */
1790 set = single_set (inv->insn);
1791 reg = dest = SET_DEST (set);
1792 if (GET_CODE (reg) == SUBREG)
1793 reg = SUBREG_REG (reg);
1794 if (REG_P (reg))
1795 regno = REGNO (reg);
1797 if (!can_move_invariant_reg (loop, inv, dest))
1799 reg = gen_reg_rtx_and_attrs (dest);
1801 /* Try replacing the destination by a new pseudoregister. */
1802 validate_change (inv->insn, &SET_DEST (set), reg, true);
1804 /* As well as all the dominated uses. */
1805 replace_uses (inv, reg, true);
1807 /* And validate all the changes. */
1808 if (!apply_change_group ())
1809 goto fail;
1811 emit_insn_after (gen_move_insn (dest, reg), inv->insn);
1813 else if (dump_file)
1814 fprintf (dump_file, "Invariant %d moved without introducing a new "
1815 "temporary register\n", invno);
1816 reorder_insns (inv->insn, inv->insn, BB_END (preheader));
1817 df_recompute_luids (preheader);
1819 /* If there is a REG_EQUAL note on the insn we just moved, and the
1820 insn is in a basic block that is not always executed or the note
1821 contains something for which we don't know the invariant status,
1822 the note may no longer be valid after we move the insn. Note that
1823 uses in REG_EQUAL notes are taken into account in the computation
1824 of invariants, so it is safe to retain the note even if it contains
1825 register references for which we know the invariant status. */
1826 if ((note = find_reg_note (inv->insn, REG_EQUAL, NULL_RTX))
1827 && (!inv->always_executed
1828 || !check_maybe_invariant (XEXP (note, 0))))
1829 remove_note (inv->insn, note);
1831 else
1833 if (!move_invariant_reg (loop, repr->invno))
1834 goto fail;
1835 reg = repr->reg;
1836 regno = repr->orig_regno;
1837 if (!replace_uses (inv, reg, false))
1838 goto fail;
1839 set = single_set (inv->insn);
1840 emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
1841 delete_insn (inv->insn);
1844 inv->reg = reg;
1845 inv->orig_regno = regno;
1847 return true;
1849 fail:
1850 /* If we failed, clear move flag, so that we do not try to move inv
1851 again. */
1852 if (dump_file)
1853 fprintf (dump_file, "Failed to move invariant %d\n", invno);
1854 inv->move = false;
1855 inv->reg = NULL_RTX;
1856 inv->orig_regno = -1;
1858 return false;
1861 /* Move selected invariant out of the LOOP. Newly created regs are marked
1862 in TEMPORARY_REGS. */
1864 static void
1865 move_invariants (struct loop *loop)
1867 struct invariant *inv;
1868 unsigned i;
1870 FOR_EACH_VEC_ELT (invariants, i, inv)
1871 move_invariant_reg (loop, i);
1872 if (flag_ira_loop_pressure && resize_reg_info ())
1874 FOR_EACH_VEC_ELT (invariants, i, inv)
1875 if (inv->reg != NULL_RTX)
1877 if (inv->orig_regno >= 0)
1878 setup_reg_classes (REGNO (inv->reg),
1879 reg_preferred_class (inv->orig_regno),
1880 reg_alternate_class (inv->orig_regno),
1881 reg_allocno_class (inv->orig_regno));
1882 else
1883 setup_reg_classes (REGNO (inv->reg),
1884 GENERAL_REGS, NO_REGS, GENERAL_REGS);
1889 /* Initializes invariant motion data. */
1891 static void
1892 init_inv_motion_data (void)
1894 actual_stamp = 1;
1896 invariants.create (100);
1899 /* Frees the data allocated by invariant motion. */
1901 static void
1902 free_inv_motion_data (void)
1904 unsigned i;
1905 struct def *def;
1906 struct invariant *inv;
1908 check_invariant_table_size ();
1909 for (i = 0; i < DF_DEFS_TABLE_SIZE (); i++)
1911 inv = invariant_table[i];
1912 if (inv)
1914 def = inv->def;
1915 gcc_assert (def != NULL);
1917 free_use_list (def->uses);
1918 free (def);
1919 invariant_table[i] = NULL;
1923 FOR_EACH_VEC_ELT (invariants, i, inv)
1925 BITMAP_FREE (inv->depends_on);
1926 free (inv);
1928 invariants.release ();
1931 /* Move the invariants out of the LOOP. */
1933 static void
1934 move_single_loop_invariants (struct loop *loop)
1936 init_inv_motion_data ();
1938 find_invariants (loop);
1939 find_invariants_to_move (optimize_loop_for_speed_p (loop),
1940 LOOP_DATA (loop)->has_call);
1941 move_invariants (loop);
1943 free_inv_motion_data ();
1946 /* Releases the auxiliary data for LOOP. */
1948 static void
1949 free_loop_data (struct loop *loop)
1951 struct loop_data *data = LOOP_DATA (loop);
1952 if (!data)
1953 return;
1955 bitmap_clear (&LOOP_DATA (loop)->regs_ref);
1956 bitmap_clear (&LOOP_DATA (loop)->regs_live);
1957 free (data);
1958 loop->aux = NULL;
1963 /* Registers currently living. */
1964 static bitmap_head curr_regs_live;
1966 /* Current reg pressure for each pressure class. */
1967 static int curr_reg_pressure[N_REG_CLASSES];
1969 /* Record all regs that are set in any one insn. Communication from
1970 mark_reg_{store,clobber} and global_conflicts. Asm can refer to
1971 all hard-registers. */
1972 static rtx regs_set[(FIRST_PSEUDO_REGISTER > MAX_RECOG_OPERANDS
1973 ? FIRST_PSEUDO_REGISTER : MAX_RECOG_OPERANDS) * 2];
1974 /* Number of regs stored in the previous array. */
1975 static int n_regs_set;
1977 /* Return pressure class and number of needed hard registers (through
1978 *NREGS) of register REGNO. */
1979 static enum reg_class
1980 get_regno_pressure_class (int regno, int *nregs)
1982 if (regno >= FIRST_PSEUDO_REGISTER)
1984 enum reg_class pressure_class;
1986 pressure_class = reg_allocno_class (regno);
1987 pressure_class = ira_pressure_class_translate[pressure_class];
1988 *nregs
1989 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
1990 return pressure_class;
1992 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
1993 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
1995 *nregs = 1;
1996 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
1998 else
2000 *nregs = 0;
2001 return NO_REGS;
2005 /* Increase (if INCR_P) or decrease current register pressure for
2006 register REGNO. */
2007 static void
2008 change_pressure (int regno, bool incr_p)
2010 int nregs;
2011 enum reg_class pressure_class;
2013 pressure_class = get_regno_pressure_class (regno, &nregs);
2014 if (! incr_p)
2015 curr_reg_pressure[pressure_class] -= nregs;
2016 else
2018 curr_reg_pressure[pressure_class] += nregs;
2019 if (LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
2020 < curr_reg_pressure[pressure_class])
2021 LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
2022 = curr_reg_pressure[pressure_class];
2026 /* Mark REGNO birth. */
2027 static void
2028 mark_regno_live (int regno)
2030 struct loop *loop;
2032 for (loop = curr_loop;
2033 loop != current_loops->tree_root;
2034 loop = loop_outer (loop))
2035 bitmap_set_bit (&LOOP_DATA (loop)->regs_live, regno);
2036 if (!bitmap_set_bit (&curr_regs_live, regno))
2037 return;
2038 change_pressure (regno, true);
2041 /* Mark REGNO death. */
2042 static void
2043 mark_regno_death (int regno)
2045 if (! bitmap_clear_bit (&curr_regs_live, regno))
2046 return;
2047 change_pressure (regno, false);
2050 /* Mark setting register REG. */
2051 static void
2052 mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
2053 void *data ATTRIBUTE_UNUSED)
2055 if (GET_CODE (reg) == SUBREG)
2056 reg = SUBREG_REG (reg);
2058 if (! REG_P (reg))
2059 return;
2061 regs_set[n_regs_set++] = reg;
2063 unsigned int end_regno = END_REGNO (reg);
2064 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
2065 mark_regno_live (regno);
2068 /* Mark clobbering register REG. */
2069 static void
2070 mark_reg_clobber (rtx reg, const_rtx setter, void *data)
2072 if (GET_CODE (setter) == CLOBBER)
2073 mark_reg_store (reg, setter, data);
2076 /* Mark register REG death. */
2077 static void
2078 mark_reg_death (rtx reg)
2080 unsigned int end_regno = END_REGNO (reg);
2081 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
2082 mark_regno_death (regno);
2085 /* Mark occurrence of registers in X for the current loop. */
2086 static void
2087 mark_ref_regs (rtx x)
2089 RTX_CODE code;
2090 int i;
2091 const char *fmt;
2093 if (!x)
2094 return;
2096 code = GET_CODE (x);
2097 if (code == REG)
2099 struct loop *loop;
2101 for (loop = curr_loop;
2102 loop != current_loops->tree_root;
2103 loop = loop_outer (loop))
2104 bitmap_set_bit (&LOOP_DATA (loop)->regs_ref, REGNO (x));
2105 return;
2108 fmt = GET_RTX_FORMAT (code);
2109 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2110 if (fmt[i] == 'e')
2111 mark_ref_regs (XEXP (x, i));
2112 else if (fmt[i] == 'E')
2114 int j;
2116 for (j = 0; j < XVECLEN (x, i); j++)
2117 mark_ref_regs (XVECEXP (x, i, j));
2121 /* Calculate register pressure in the loops. */
2122 static void
2123 calculate_loop_reg_pressure (void)
2125 int i;
2126 unsigned int j;
2127 bitmap_iterator bi;
2128 basic_block bb;
2129 rtx_insn *insn;
2130 rtx link;
2131 struct loop *loop, *parent;
2133 FOR_EACH_LOOP (loop, 0)
2134 if (loop->aux == NULL)
2136 loop->aux = xcalloc (1, sizeof (struct loop_data));
2137 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
2138 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
2140 ira_setup_eliminable_regset ();
2141 bitmap_initialize (&curr_regs_live, &reg_obstack);
2142 FOR_EACH_BB_FN (bb, cfun)
2144 curr_loop = bb->loop_father;
2145 if (curr_loop == current_loops->tree_root)
2146 continue;
2148 for (loop = curr_loop;
2149 loop != current_loops->tree_root;
2150 loop = loop_outer (loop))
2151 bitmap_ior_into (&LOOP_DATA (loop)->regs_live, DF_LR_IN (bb));
2153 bitmap_copy (&curr_regs_live, DF_LR_IN (bb));
2154 for (i = 0; i < ira_pressure_classes_num; i++)
2155 curr_reg_pressure[ira_pressure_classes[i]] = 0;
2156 EXECUTE_IF_SET_IN_BITMAP (&curr_regs_live, 0, j, bi)
2157 change_pressure (j, true);
2159 FOR_BB_INSNS (bb, insn)
2161 if (! NONDEBUG_INSN_P (insn))
2162 continue;
2164 mark_ref_regs (PATTERN (insn));
2165 n_regs_set = 0;
2166 note_stores (PATTERN (insn), mark_reg_clobber, NULL);
2168 /* Mark any registers dead after INSN as dead now. */
2170 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2171 if (REG_NOTE_KIND (link) == REG_DEAD)
2172 mark_reg_death (XEXP (link, 0));
2174 /* Mark any registers set in INSN as live,
2175 and mark them as conflicting with all other live regs.
2176 Clobbers are processed again, so they conflict with
2177 the registers that are set. */
2179 note_stores (PATTERN (insn), mark_reg_store, NULL);
2181 if (AUTO_INC_DEC)
2182 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2183 if (REG_NOTE_KIND (link) == REG_INC)
2184 mark_reg_store (XEXP (link, 0), NULL_RTX, NULL);
2186 while (n_regs_set-- > 0)
2188 rtx note = find_regno_note (insn, REG_UNUSED,
2189 REGNO (regs_set[n_regs_set]));
2190 if (! note)
2191 continue;
2193 mark_reg_death (XEXP (note, 0));
2197 bitmap_clear (&curr_regs_live);
2198 if (flag_ira_region == IRA_REGION_MIXED
2199 || flag_ira_region == IRA_REGION_ALL)
2200 FOR_EACH_LOOP (loop, 0)
2202 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
2203 if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
2205 enum reg_class pressure_class;
2206 int nregs;
2208 pressure_class = get_regno_pressure_class (j, &nregs);
2209 LOOP_DATA (loop)->max_reg_pressure[pressure_class] -= nregs;
2212 if (dump_file == NULL)
2213 return;
2214 FOR_EACH_LOOP (loop, 0)
2216 parent = loop_outer (loop);
2217 fprintf (dump_file, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
2218 loop->num, (parent == NULL ? -1 : parent->num),
2219 loop->header->index, loop_depth (loop));
2220 fprintf (dump_file, "\n ref. regnos:");
2221 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_ref, 0, j, bi)
2222 fprintf (dump_file, " %d", j);
2223 fprintf (dump_file, "\n live regnos:");
2224 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
2225 fprintf (dump_file, " %d", j);
2226 fprintf (dump_file, "\n Pressure:");
2227 for (i = 0; (int) i < ira_pressure_classes_num; i++)
2229 enum reg_class pressure_class;
2231 pressure_class = ira_pressure_classes[i];
2232 if (LOOP_DATA (loop)->max_reg_pressure[pressure_class] == 0)
2233 continue;
2234 fprintf (dump_file, " %s=%d", reg_class_names[pressure_class],
2235 LOOP_DATA (loop)->max_reg_pressure[pressure_class]);
2237 fprintf (dump_file, "\n");
2243 /* Move the invariants out of the loops. */
2245 void
2246 move_loop_invariants (void)
2248 struct loop *loop;
2250 if (flag_ira_loop_pressure)
2252 df_analyze ();
2253 regstat_init_n_sets_and_refs ();
2254 ira_set_pseudo_classes (true, dump_file);
2255 calculate_loop_reg_pressure ();
2256 regstat_free_n_sets_and_refs ();
2258 df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
2259 /* Process the loops, innermost first. */
2260 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2262 curr_loop = loop;
2263 /* move_single_loop_invariants for very large loops
2264 is time consuming and might need a lot of memory. */
2265 if (loop->num_nodes <= (unsigned) LOOP_INVARIANT_MAX_BBS_IN_LOOP)
2266 move_single_loop_invariants (loop);
2269 FOR_EACH_LOOP (loop, 0)
2271 free_loop_data (loop);
2274 if (flag_ira_loop_pressure)
2275 /* There is no sense to keep this info because it was most
2276 probably outdated by subsequent passes. */
2277 free_reg_info ();
2278 free (invariant_table);
2279 invariant_table = NULL;
2280 invariant_table_size = 0;
2282 checking_verify_flow_info ();