1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
27 #include "insn-config.h"
30 #include "integrate.h"
34 #include "hard-reg-set.h"
35 #include "basic-block.h"
43 /* This is the toplevel file of a graph coloring register allocator.
44 It is able to act like a George & Appel allocator, i.e. with iterative
45 coalescing plus spill coalescing/propagation.
46 And it can act as a traditional Briggs allocator, although with
47 optimistic coalescing. Additionally it has a custom pass, which
48 tries to reduce the overall cost of the colored graph.
50 We support two modes of spilling: spill-everywhere, which is extremely
51 fast, and interference region spilling, which reduces spill code to a
52 large extent, but is slower.
56 Briggs, P., Cooper, K. D., and Torczon, L. 1994. Improvements to graph
57 coloring register allocation. ACM Trans. Program. Lang. Syst. 16, 3 (May),
60 Bergner, P., Dahl, P., Engebretsen, D., and O'Keefe, M. 1997. Spill code
61 minimization via interference region spilling. In Proc. ACM SIGPLAN '97
62 Conf. on Prog. Language Design and Implementation. ACM, 287-295.
64 George, L., Appel, A.W. 1996. Iterated register coalescing.
65 ACM Trans. Program. Lang. Syst. 18, 3 (May), 300-324.
69 /* This file contains the main entry point (reg_alloc), some helper routines
70 used by more than one file of the register allocator, and the toplevel
71 driver procedure (one_pass). */
73 /* Things, one might do somewhen:
75 * Lattice based rematerialization
76 * create definitions of ever-life regs at the beginning of
78 * insert loads as soon, stores as late as possible
79 * insert spill insns as outward as possible (either looptree, or LCM)
81 * delete coalesced insns. Partly done. The rest can only go, when we get
83 * don't destroy coalescing information completely when spilling
84 * use the constraints from asms
87 static struct obstack ra_obstack
;
88 static void create_insn_info
PARAMS ((struct df
*));
89 static void free_insn_info
PARAMS ((void));
90 static void alloc_mem
PARAMS ((struct df
*));
91 static void free_mem
PARAMS ((struct df
*));
92 static void free_all_mem
PARAMS ((struct df
*df
));
93 static int one_pass
PARAMS ((struct df
*, int));
94 static void check_df
PARAMS ((struct df
*));
95 static void init_ra
PARAMS ((void));
97 void reg_alloc
PARAMS ((void));
99 /* These global variables are "internal" to the register allocator.
100 They are all documented at their declarations in ra.h. */
102 /* Somewhen we want to get rid of one of those sbitmaps.
103 (for now I need the sup_igraph to note if there is any conflict between
104 parts of webs at all. I can't use igraph for this, as there only the real
105 conflicts are noted.) This is only used to prevent coalescing two
106 conflicting webs, were only parts of them are in conflict. */
110 /* Note the insns not inserted by the allocator, where we detected any
111 deaths of pseudos. It is used to detect closeness of defs and uses.
112 In the first pass this is empty (we could initialize it from REG_DEAD
113 notes), in the other passes it is left from the pass before. */
114 sbitmap insns_with_deaths
;
115 int death_insns_max_uid
;
117 struct web_part
*web_parts
;
119 unsigned int num_webs
;
120 unsigned int num_subwebs
;
121 unsigned int num_allwebs
;
123 struct web
*hardreg2web
[FIRST_PSEUDO_REGISTER
];
124 struct web
**def2web
;
125 struct web
**use2web
;
126 struct move_list
*wl_moves
;
128 short *ra_reg_renumber
;
132 unsigned int max_normal_pseudo
;
133 int an_unusable_color
;
135 /* The different lists on which a web can be (based on the type). */
136 struct dlist
*web_lists
[(int) LAST_NODE_TYPE
];
138 unsigned int last_def_id
;
139 unsigned int last_use_id
;
140 unsigned int last_num_webs
;
142 sbitmap last_check_uses
;
143 unsigned int remember_conflicts
;
147 HARD_REG_SET never_use_colors
;
148 HARD_REG_SET usable_regs
[N_REG_CLASSES
];
149 unsigned int num_free_regs
[N_REG_CLASSES
];
150 HARD_REG_SET hardregs_for_mode
[NUM_MACHINE_MODES
];
151 HARD_REG_SET invalid_mode_change_regs
;
152 unsigned char byte2bitcount
[256];
154 unsigned int debug_new_regalloc
= -1;
155 int flag_ra_biased
= 0;
156 int flag_ra_improved_spilling
= 0;
157 int flag_ra_ir_spilling
= 0;
158 int flag_ra_optimistic_coalescing
= 0;
159 int flag_ra_break_aliases
= 0;
160 int flag_ra_merge_spill_costs
= 0;
161 int flag_ra_spill_every_use
= 0;
162 int flag_ra_dump_notes
= 0;
164 /* Fast allocation of small objects, which live until the allocator
165 is done. Allocate an object of SIZE bytes. */
171 return obstack_alloc (&ra_obstack
, size
);
174 /* Like ra_alloc(), but clear the returned memory. */
180 void *p
= obstack_alloc (&ra_obstack
, size
);
185 /* Returns the number of hardregs in HARD_REG_SET RS. */
195 unsigned char byte
= rs
& 0xFF;
197 /* Avoid memory access, if nothing is set. */
199 count
+= byte2bitcount
[byte
];
203 for (ofs
= 0; ofs
< HARD_REG_SET_LONGS
; ofs
++)
205 HARD_REG_ELT_TYPE elt
= rs
[ofs
];
208 unsigned char byte
= elt
& 0xFF;
211 count
+= byte2bitcount
[byte
];
218 /* Basically like emit_move_insn (i.e. validifies constants and such),
219 but also handle MODE_CC moves (but then the operands must already
220 be basically valid. */
223 ra_emit_move_insn (x
, y
)
226 enum machine_mode mode
= GET_MODE (x
);
227 if (GET_MODE_CLASS (mode
) == MODE_CC
)
228 return emit_insn (gen_move_insn (x
, y
));
230 return emit_move_insn (x
, y
);
234 struct ra_insn_info
*insn_df
;
235 static struct ref
**refs_for_insn_df
;
237 /* Create the insn_df structure for each insn to have fast access to
238 all valid defs and uses in an insn. */
241 create_insn_info (df
)
245 struct ref
**act_refs
;
246 insn_df_max_uid
= get_max_uid ();
247 insn_df
= xcalloc (insn_df_max_uid
, sizeof (insn_df
[0]));
248 refs_for_insn_df
= xcalloc (df
->def_id
+ df
->use_id
, sizeof (struct ref
*));
249 act_refs
= refs_for_insn_df
;
250 /* We create those things backwards to mimic the order in which
251 the insns are visited in rewrite_program2() and live_in(). */
252 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
254 int uid
= INSN_UID (insn
);
256 struct df_link
*link
;
259 for (n
= 0, link
= DF_INSN_DEFS (df
, insn
); link
; link
= link
->next
)
261 && (DF_REF_REGNO (link
->ref
) >= FIRST_PSEUDO_REGISTER
262 || !TEST_HARD_REG_BIT (never_use_colors
,
263 DF_REF_REGNO (link
->ref
))))
266 insn_df
[uid
].defs
= act_refs
;
267 insn_df
[uid
].defs
[n
++] = link
->ref
;
270 insn_df
[uid
].num_defs
= n
;
271 for (n
= 0, link
= DF_INSN_USES (df
, insn
); link
; link
= link
->next
)
273 && (DF_REF_REGNO (link
->ref
) >= FIRST_PSEUDO_REGISTER
274 || !TEST_HARD_REG_BIT (never_use_colors
,
275 DF_REF_REGNO (link
->ref
))))
278 insn_df
[uid
].uses
= act_refs
;
279 insn_df
[uid
].uses
[n
++] = link
->ref
;
282 insn_df
[uid
].num_uses
= n
;
284 if (refs_for_insn_df
+ (df
->def_id
+ df
->use_id
) < act_refs
)
288 /* Free the insn_df structures. */
293 free (refs_for_insn_df
);
294 refs_for_insn_df
= NULL
;
300 /* Search WEB for a subweb, which represents REG. REG needs to
301 be a SUBREG, and the inner reg of it needs to be the one which is
302 represented by WEB. Returns the matching subweb or NULL. */
305 find_subweb (web
, reg
)
310 if (GET_CODE (reg
) != SUBREG
)
312 for (w
= web
->subreg_next
; w
; w
= w
->subreg_next
)
313 if (GET_MODE (w
->orig_x
) == GET_MODE (reg
)
314 && SUBREG_BYTE (w
->orig_x
) == SUBREG_BYTE (reg
))
319 /* Similar to find_subweb(), but matches according to SIZE_WORD,
320 a collection of the needed size and offset (in bytes). */
323 find_subweb_2 (web
, size_word
)
325 unsigned int size_word
;
328 if (size_word
== GET_MODE_SIZE (GET_MODE (web
->orig_x
)))
329 /* size_word == size means BYTE_BEGIN(size_word) == 0. */
331 for (w
= web
->subreg_next
; w
; w
= w
->subreg_next
)
333 unsigned int bl
= rtx_to_bits (w
->orig_x
);
340 /* Returns the superweb for SUBWEB. */
343 find_web_for_subweb_1 (subweb
)
346 while (subweb
->parent_web
)
347 subweb
= subweb
->parent_web
;
351 /* Determine if two hard register sets intersect.
352 Return 1 if they do. */
355 hard_regs_intersect_p (a
, b
)
359 COPY_HARD_REG_SET (c
, *a
);
360 AND_HARD_REG_SET (c
, *b
);
361 GO_IF_HARD_REG_SUBSET (c
, reg_class_contents
[(int) NO_REGS
], lose
);
367 /* Allocate and initialize the memory necessary for one pass of the
368 register allocator. */
375 ra_build_realloc (df
);
378 live_at_end
= (bitmap
*) xmalloc ((last_basic_block
+ 2)
380 for (i
= 0; i
< last_basic_block
+ 2; i
++)
381 live_at_end
[i
] = BITMAP_XMALLOC ();
384 create_insn_info (df
);
387 /* Free the memory which isn't necessary for the next pass. */
391 struct df
*df ATTRIBUTE_UNUSED
;
397 /* Free all memory allocated for the register allocator. Used, when
406 for (i
= 0; i
< (unsigned)last_basic_block
+ 2; i
++)
407 BITMAP_XFREE (live_at_end
[i
]);
410 ra_colorize_free_all ();
411 ra_build_free_all (df
);
412 obstack_free (&ra_obstack
, NULL
);
415 static long ticks_build
;
416 static long ticks_rebuild
;
418 /* Perform one pass of allocation. Returns nonzero, if some spill code
419 was added, i.e. if the allocator needs to rerun. */
422 one_pass (df
, rebuild
)
426 long ticks
= clock ();
427 int something_spilled
;
428 remember_conflicts
= 0;
430 /* Build the complete interference graph, or if this is not the first
431 pass, rebuild it incrementally. */
434 /* From now on, if we create new conflicts, we need to remember the
435 initial list of conflicts per web. */
436 remember_conflicts
= 1;
438 dump_igraph_machine ();
440 /* Colorize the I-graph. This results in either a list of
441 spilled_webs, in which case we need to run the spill phase, and
442 rerun the allocator, or that list is empty, meaning we are done. */
443 ra_colorize_graph (df
);
445 last_max_uid
= get_max_uid ();
446 /* actual_spill() might change WEBS(SPILLED) and even empty it,
447 so we need to remember it's state. */
448 something_spilled
= !!WEBS(SPILLED
);
450 /* Add spill code if necessary. */
451 if (something_spilled
)
454 ticks
= clock () - ticks
;
456 ticks_rebuild
+= ticks
;
458 ticks_build
+= ticks
;
459 return something_spilled
;
462 /* Initialize various arrays for the register allocator. */
469 #ifdef ELIMINABLE_REGS
470 static const struct {const int from
, to
; } eliminables
[] = ELIMINABLE_REGS
;
474 = (! flag_omit_frame_pointer
475 #ifdef EXIT_IGNORE_STACK
476 || (current_function_calls_alloca
&& EXIT_IGNORE_STACK
)
478 || FRAME_POINTER_REQUIRED
);
482 /* We can't ever use any of the fixed regs. */
483 COPY_HARD_REG_SET (never_use_colors
, fixed_reg_set
);
485 /* Additionally don't even try to use hardregs, which we already
486 know are not eliminable. This includes also either the
487 hard framepointer or all regs which are eliminable into the
488 stack pointer, if need_fp is set. */
489 #ifdef ELIMINABLE_REGS
490 for (j
= 0; j
< ARRAY_SIZE (eliminables
); j
++)
492 if (! CAN_ELIMINATE (eliminables
[j
].from
, eliminables
[j
].to
)
493 || (eliminables
[j
].to
== STACK_POINTER_REGNUM
&& need_fp
))
494 for (i
= HARD_REGNO_NREGS (eliminables
[j
].from
, Pmode
); i
--;)
495 SET_HARD_REG_BIT (never_use_colors
, eliminables
[j
].from
+ i
);
497 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
499 for (i
= HARD_REGNO_NREGS (HARD_FRAME_POINTER_REGNUM
, Pmode
); i
--;)
500 SET_HARD_REG_BIT (never_use_colors
, HARD_FRAME_POINTER_REGNUM
+ i
);
505 for (i
= HARD_REGNO_NREGS (FRAME_POINTER_REGNUM
, Pmode
); i
--;)
506 SET_HARD_REG_BIT (never_use_colors
, FRAME_POINTER_REGNUM
+ i
);
509 /* Stack and argument pointer are also rather useless to us. */
510 for (i
= HARD_REGNO_NREGS (STACK_POINTER_REGNUM
, Pmode
); i
--;)
511 SET_HARD_REG_BIT (never_use_colors
, STACK_POINTER_REGNUM
+ i
);
513 for (i
= HARD_REGNO_NREGS (ARG_POINTER_REGNUM
, Pmode
); i
--;)
514 SET_HARD_REG_BIT (never_use_colors
, ARG_POINTER_REGNUM
+ i
);
516 for (i
= 0; i
< 256; i
++)
518 unsigned char byte
= ((unsigned) i
) & 0xFF;
519 unsigned char count
= 0;
526 byte2bitcount
[i
] = count
;
529 for (i
= 0; i
< N_REG_CLASSES
; i
++)
532 COPY_HARD_REG_SET (rs
, reg_class_contents
[i
]);
533 AND_COMPL_HARD_REG_SET (rs
, never_use_colors
);
534 size
= hard_regs_count (rs
);
535 num_free_regs
[i
] = size
;
536 COPY_HARD_REG_SET (usable_regs
[i
], rs
);
539 /* Setup hardregs_for_mode[].
540 We are not interested only in the beginning of a multi-reg, but in
541 all the hardregs involved. Maybe HARD_REGNO_MODE_OK() only ok's
543 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
546 CLEAR_HARD_REG_SET (rs
);
547 for (reg
= 0; reg
< FIRST_PSEUDO_REGISTER
; reg
++)
548 if (HARD_REGNO_MODE_OK (reg
, i
)
549 /* Ignore VOIDmode and similar things. */
550 && (size
= HARD_REGNO_NREGS (reg
, i
)) != 0
551 && (reg
+ size
) <= FIRST_PSEUDO_REGISTER
)
554 SET_HARD_REG_BIT (rs
, reg
+ size
);
556 COPY_HARD_REG_SET (hardregs_for_mode
[i
], rs
);
559 CLEAR_HARD_REG_SET (invalid_mode_change_regs
);
560 #ifdef CANNOT_CHANGE_MODE_CLASS
562 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
564 enum machine_mode from
= (enum machine_mode
) i
;
565 enum machine_mode to
;
566 for (to
= VOIDmode
; to
< MAX_MACHINE_MODE
; ++to
)
569 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
570 if (REG_CANNOT_CHANGE_MODE_P (from
, to
, r
))
571 SET_HARD_REG_BIT (invalid_mode_change_regs
, r
);
576 for (an_unusable_color
= 0; an_unusable_color
< FIRST_PSEUDO_REGISTER
;
578 if (TEST_HARD_REG_BIT (never_use_colors
, an_unusable_color
))
580 if (an_unusable_color
== FIRST_PSEUDO_REGISTER
)
583 orig_max_uid
= get_max_uid ();
584 compute_bb_for_insn ();
585 ra_reg_renumber
= NULL
;
586 insns_with_deaths
= sbitmap_alloc (orig_max_uid
);
587 death_insns_max_uid
= orig_max_uid
;
588 sbitmap_ones (insns_with_deaths
);
589 gcc_obstack_init (&ra_obstack
);
592 /* Check the consistency of DF. This aborts if it violates some
593 invariances we expect. */
599 struct df_link
*link
;
603 bitmap b
= BITMAP_XMALLOC ();
604 bitmap empty_defs
= BITMAP_XMALLOC ();
605 bitmap empty_uses
= BITMAP_XMALLOC ();
607 /* Collect all the IDs of NULL references in the ID->REF arrays,
608 as df.c leaves them when updating the df structure. */
609 for (ui
= 0; ui
< df
->def_id
; ui
++)
611 bitmap_set_bit (empty_defs
, ui
);
612 for (ui
= 0; ui
< df
->use_id
; ui
++)
614 bitmap_set_bit (empty_uses
, ui
);
616 /* For each insn we check if the chain of references contain each
617 ref only once, doesn't contain NULL refs, or refs whose ID is invalid
618 (it df->refs[id] element is NULL). */
619 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
623 for (link
= DF_INSN_DEFS (df
, insn
); link
; link
= link
->next
)
624 if (!link
->ref
|| bitmap_bit_p (empty_defs
, DF_REF_ID (link
->ref
))
625 || bitmap_bit_p (b
, DF_REF_ID (link
->ref
)))
628 bitmap_set_bit (b
, DF_REF_ID (link
->ref
));
631 for (link
= DF_INSN_USES (df
, insn
); link
; link
= link
->next
)
632 if (!link
->ref
|| bitmap_bit_p (empty_uses
, DF_REF_ID (link
->ref
))
633 || bitmap_bit_p (b
, DF_REF_ID (link
->ref
)))
636 bitmap_set_bit (b
, DF_REF_ID (link
->ref
));
639 /* Now the same for the chains per register number. */
640 for (regno
= 0; regno
< max_reg_num (); regno
++)
643 for (link
= df
->regs
[regno
].defs
; link
; link
= link
->next
)
644 if (!link
->ref
|| bitmap_bit_p (empty_defs
, DF_REF_ID (link
->ref
))
645 || bitmap_bit_p (b
, DF_REF_ID (link
->ref
)))
648 bitmap_set_bit (b
, DF_REF_ID (link
->ref
));
651 for (link
= df
->regs
[regno
].uses
; link
; link
= link
->next
)
652 if (!link
->ref
|| bitmap_bit_p (empty_uses
, DF_REF_ID (link
->ref
))
653 || bitmap_bit_p (b
, DF_REF_ID (link
->ref
)))
656 bitmap_set_bit (b
, DF_REF_ID (link
->ref
));
659 BITMAP_XFREE (empty_uses
);
660 BITMAP_XFREE (empty_defs
);
664 /* Main register allocator entry point. */
670 FILE *ra_dump_file
= rtl_dump_file
;
671 rtx last
= get_last_insn ();
674 last
= prev_real_insn (last
);
675 /* If this is an empty function we shouldn't do all the following,
676 but instead just setup what's necessary, and return. */
678 /* We currently rely on the existence of the return value USE as
679 one of the last insns. Add it if it's not there anymore. */
683 for (e
= EXIT_BLOCK_PTR
->pred
; e
; e
= e
->pred_next
)
685 basic_block bb
= e
->src
;
687 if (!INSN_P (last
) || GET_CODE (PATTERN (last
)) != USE
)
691 use_return_register ();
692 insns
= get_insns ();
694 emit_insn_after (insns
, last
);
699 /* Setup debugging levels. */
702 /* Some useful presets of the debug level, I often use. */
703 case 0: debug_new_regalloc
= DUMP_EVER
; break;
704 case 1: debug_new_regalloc
= DUMP_COSTS
; break;
705 case 2: debug_new_regalloc
= DUMP_IGRAPH_M
; break;
706 case 3: debug_new_regalloc
= DUMP_COLORIZE
+ DUMP_COSTS
; break;
707 case 4: debug_new_regalloc
= DUMP_COLORIZE
+ DUMP_COSTS
+ DUMP_WEBS
;
709 case 5: debug_new_regalloc
= DUMP_FINAL_RTL
+ DUMP_COSTS
+
712 case 6: debug_new_regalloc
= DUMP_VALIDIFY
; break;
715 debug_new_regalloc
= 0;
717 /* Run regclass first, so we know the preferred and alternate classes
718 for each pseudo. Deactivate emitting of debug info, if it's not
719 explicitly requested. */
720 if ((debug_new_regalloc
& DUMP_REGCLASS
) == 0)
721 rtl_dump_file
= NULL
;
722 regclass (get_insns (), max_reg_num (), rtl_dump_file
);
723 rtl_dump_file
= ra_dump_file
;
725 /* We don't use those NOTEs, and as we anyway change all registers,
726 they only make problems later. */
727 count_or_remove_death_notes (NULL
, 1);
729 /* Initialize the different global arrays and regsets. */
732 /* And some global variables. */
735 max_normal_pseudo
= (unsigned) max_reg_num ();
741 last_check_uses
= NULL
;
743 WEBS(INITIAL
) = NULL
;
745 memset (hardreg2web
, 0, sizeof (hardreg2web
));
746 ticks_build
= ticks_rebuild
= 0;
748 /* The default is to use optimistic coalescing with interference
749 region spilling, without biased coloring. */
751 flag_ra_spill_every_use
= 0;
752 flag_ra_improved_spilling
= 1;
753 flag_ra_ir_spilling
= 1;
754 flag_ra_break_aliases
= 0;
755 flag_ra_optimistic_coalescing
= 1;
756 flag_ra_merge_spill_costs
= 1;
757 if (flag_ra_optimistic_coalescing
)
758 flag_ra_break_aliases
= 1;
759 flag_ra_dump_notes
= 0;
761 /* Allocate the global df structure. */
764 /* This is the main loop, calling one_pass as long as there are still
765 some spilled webs. */
768 ra_debug_msg (DUMP_NEARLY_EVER
, "RegAlloc Pass %d\n\n", ra_pass
);
770 internal_error ("Didn't find a coloring.\n");
772 /* First collect all the register refs and put them into
773 chains per insn, and per regno. In later passes only update
774 that info from the new and modified insns. */
775 df_analyse (df
, (ra_pass
== 1) ? 0 : (bitmap
) -1,
776 DF_HARD_REGS
| DF_RD_CHAIN
| DF_RU_CHAIN
| DF_FOR_REGALLOC
);
778 if ((debug_new_regalloc
& DUMP_DF
) != 0)
781 df_dump (df
, DF_HARD_REGS
, rtl_dump_file
);
782 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
784 df_insn_debug_regno (df
, insn
, rtl_dump_file
);
788 /* Now allocate the memory needed for this pass, or (if it's not the
789 first pass), reallocate only additional memory. */
792 /* Build and colorize the interference graph, and possibly emit
793 spill insns. This also might delete certain move insns. */
794 changed
= one_pass (df
, ra_pass
> 1);
796 /* If that produced no changes, the graph was colorizable. */
799 /* Change the insns to refer to the new pseudos (one per web). */
801 /* Already setup a preliminary reg_renumber[] array, but don't
802 free our own version. reg_renumber[] will again be destroyed
803 later. We right now need it in dump_constraints() for
804 constrain_operands(1) whose subproc sometimes reference
805 it (because we are checking strictly, i.e. as if
808 /* Delete some more of the coalesced moves. */
814 /* If there were changes, this means spill code was added,
815 therefore repeat some things, including some initialization
816 of global data structures. */
817 if ((debug_new_regalloc
& DUMP_REGCLASS
) == 0)
818 rtl_dump_file
= NULL
;
819 /* We have new pseudos (the stackwebs). */
820 allocate_reg_info (max_reg_num (), FALSE
, FALSE
);
822 compute_bb_for_insn ();
823 /* Some of them might be dead. */
824 delete_trivially_dead_insns (get_insns (), max_reg_num ());
825 /* Those new pseudos need to have their REFS count set. */
826 reg_scan_update (get_insns (), NULL
, max_regno
);
827 max_regno
= max_reg_num ();
828 /* And they need useful classes too. */
829 regclass (get_insns (), max_reg_num (), rtl_dump_file
);
830 rtl_dump_file
= ra_dump_file
;
832 /* Remember the number of defs and uses, so we can distinguish
833 new from old refs in the next pass. */
834 last_def_id
= df
->def_id
;
835 last_use_id
= df
->use_id
;
838 /* Output the graph, and possibly the current insn sequence. */
840 if (changed
&& (debug_new_regalloc
& DUMP_RTL
) != 0)
842 ra_print_rtl_with_bb (rtl_dump_file
, get_insns ());
843 fflush (rtl_dump_file
);
846 /* Reset the web lists. */
852 /* We are done with allocation, free all memory and output some
856 if ((debug_new_regalloc
& DUMP_RESULTS
) == 0)
857 dump_cost (DUMP_COSTS
);
858 ra_debug_msg (DUMP_COSTS
, "ticks for build-phase: %ld\n", ticks_build
);
859 ra_debug_msg (DUMP_COSTS
, "ticks for rebuild-phase: %ld\n", ticks_rebuild
);
860 if ((debug_new_regalloc
& (DUMP_FINAL_RTL
| DUMP_RTL
)) != 0)
861 ra_print_rtl_with_bb (rtl_dump_file
, get_insns ());
863 /* We might have new pseudos, so allocate the info arrays for them. */
864 if ((debug_new_regalloc
& DUMP_SM
) == 0)
865 rtl_dump_file
= NULL
;
867 allocate_reg_info (max_reg_num (), FALSE
, FALSE
);
869 rtl_dump_file
= ra_dump_file
;
871 /* Some spill insns could've been inserted after trapping calls, i.e.
872 at the end of a basic block, which really ends at that call.
873 Fixup that breakages by adjusting basic block boundaries. */
874 fixup_abnormal_edges ();
876 /* Cleanup the flow graph. */
877 if ((debug_new_regalloc
& DUMP_LAST_FLOW
) == 0)
878 rtl_dump_file
= NULL
;
879 life_analysis (get_insns (), rtl_dump_file
,
880 PROP_DEATH_NOTES
| PROP_LOG_LINKS
| PROP_REG_INFO
);
881 cleanup_cfg (CLEANUP_EXPENSIVE
);
882 recompute_reg_usage (get_insns (), TRUE
);
884 dump_flow_info (rtl_dump_file
);
885 rtl_dump_file
= ra_dump_file
;
887 /* update_equiv_regs() can't be called after register allocation.
888 It might delete some pseudos, and insert other insns setting
889 up those pseudos in different places. This of course screws up
890 the allocation because that may destroy a hardreg for another
892 XXX we probably should do something like that on our own. I.e.
893 creating REG_EQUIV notes. */
894 /*update_equiv_regs ();*/
896 /* Setup the reg_renumber[] array for reload. */
898 sbitmap_free (insns_with_deaths
);
900 /* Remove REG_DEAD notes which are incorrectly set. See the docu
902 remove_suspicious_death_notes ();
904 if ((debug_new_regalloc
& DUMP_LAST_RTL
) != 0)
905 ra_print_rtl_with_bb (rtl_dump_file
, get_insns ());
906 dump_static_insn_cost (rtl_dump_file
,
907 "after allocation/spilling, before reload", NULL
);
909 /* Allocate the reg_equiv_memory_loc array for reload. */
910 reg_equiv_memory_loc
= (rtx
*) xcalloc (max_regno
, sizeof (rtx
));
911 /* And possibly initialize it. */
912 allocate_initial_values (reg_equiv_memory_loc
);
913 /* And one last regclass pass just before reload. */
914 regclass (get_insns (), max_reg_num (), rtl_dump_file
);
918 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: