2007-10-09 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / ira.c
blobf60126ddf4cb133785af8701da46c12eada51f20
1 /* Integrated Register Allocator entry point.
2 Copyright (C) 2006, 2007
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 /* The integrated register allocator (IRA) is called integrated
24 because register coalescing and register live range splitting are
25 done on-the-fly during coloring. Register coalescing is done by
26 hard register preferencing during hard register assigning. The
27 live range splitting is a byproduct of the regional register
28 allocation.
30 The regional allocation is top-down process. The first we do
31 allocation for all function then we improve it for loops then their
32 subloops and so on. To reduce register shuffling, the same
33 mechanism of hard register prefrencing is used. This approach
34 works as good as Callahan-Koblentz algorithm but it is simpler.
35 We use Chaitin-Briggs coloring for each loop (or function) with
36 optional biased coloring. If pseudo-registers got different
37 location on loop borders we rename them inside the loop and
38 generate pseudo-register move insns. Some optimizations (like
39 removing redundant stores, moving register shuffling to less
40 frequent points, and code duplication reducing) to minimize effect
41 of register shuffling is done
43 If we don't improve register allocation for loops we get classic
44 Chaitin-Briggs coloring (only instead of separate pass of
45 coalescing, we use hard register preferencing).
47 Optionally we implements Chow's priority coloring only for all
48 function. It is quite analogous to the current gcc global register
49 allocator only we use more sophisticated hard register
50 preferencing.
52 Literature is worth to read for better understanding the code:
54 o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to
55 Graph Coloring Register Allocation.
57 o David Callahan, Brian Koblenz. Register allocation via
58 hierarchical graph coloring
60 o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
61 Coloring Register Allocation: A Study of the Chaitin-Briggs and
62 Callahan-Koblenz Algorithms.
64 o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
65 Register Allocation Based on Graph Fusion.
70 #include "config.h"
71 #include "system.h"
72 #include "coretypes.h"
73 #include "tm.h"
74 #include "regs.h"
75 #include "rtl.h"
76 #include "tm_p.h"
77 #include "target.h"
78 #include "flags.h"
79 #include "obstack.h"
80 #include "bitmap.h"
81 #include "hard-reg-set.h"
82 #include "basic-block.h"
83 #include "expr.h"
84 #include "recog.h"
85 #include "params.h"
86 #include "timevar.h"
87 #include "tree-pass.h"
88 #include "output.h"
89 #include "reload.h"
90 #include "errors.h"
91 #include "integrate.h"
92 #include "df.h"
93 #include "ggc.h"
94 #include "ira-int.h"
96 static void setup_inner_mode (void);
97 static void setup_reg_mode_hard_regset (void);
98 static void setup_class_subset_and_move_costs (void);
99 static void setup_reg_class_intersect (void);
100 static void setup_class_hard_regs (void);
101 static void setup_available_class_regs (void);
102 static void setup_alloc_regs (int);
103 static void setup_reg_subclasses (void);
104 #ifdef IRA_COVER_CLASSES
105 static void setup_cover_classes (void);
106 static void setup_class_translate (void);
107 #endif
108 static void print_class_cover (FILE *);
109 static void find_reg_class_closure (void);
110 static void setup_reg_class_nregs (void);
111 static void setup_prohibited_class_mode_regs (void);
112 static int insn_contains_asm_1 (rtx *, void *);
113 static int insn_contains_asm (rtx);
114 static void compute_regs_asm_clobbered (char *);
115 static void setup_eliminable_regset (void);
116 static void find_reg_equiv_invariant_const (void);
117 static void setup_reg_renumber (int, int);
118 static int setup_allocno_assignment_from_reg_renumber (void);
119 static void calculate_allocation_cost (void);
120 #ifdef ENABLE_IRA_CHECKING
121 static void check_allocation (void);
122 #endif
123 static void fix_reg_equiv_init (void);
124 #ifdef ENABLE_IRA_CHECKING
125 static void print_redundant_copies (void);
126 #endif
127 static void setup_preferred_alternate_classes (void);
128 static void expand_reg_info (int);
130 static bool gate_ira (void);
131 static unsigned int rest_of_handle_ira (void);
133 /* Dump file for IRA. */
134 FILE *ira_dump_file;
136 /* The number of elements in the following array. */
137 int spilled_reg_stack_slots_num;
139 /* The following array contains description of spilled registers stack
140 slots have been used in the current function so far. */
141 struct spilled_reg_stack_slot *spilled_reg_stack_slots;
143 /* The following variable values are correspondingly overall cost of
144 the allocation, cost of hard register usage for the allocnos, cost
145 of memory usage for the allocnos, cost of loads, stores and register
146 move insns generated for register live range splitting. */
147 int overall_cost;
148 int reg_cost, mem_cost;
149 int load_cost, store_cost, shuffle_cost;
150 int move_loops_num, additional_jumps_num;
152 /* A mode whose value is immediately contained in given mode
153 value. */
154 unsigned char mode_inner_mode [NUM_MACHINE_MODES];
156 /* The following array is a map hard regs X modes -> number registers
157 for store value of given mode starting with given hard
158 register. */
159 HARD_REG_SET reg_mode_hard_regset [FIRST_PSEUDO_REGISTER] [NUM_MACHINE_MODES];
161 /* The following two variables are array analog of macros
162 MEMORY_MOVE_COST and REGISTER_MOVE_COST. */
163 int memory_move_cost [MAX_MACHINE_MODE] [N_REG_CLASSES] [2];
164 int register_move_cost [MAX_MACHINE_MODE] [N_REG_CLASSES] [N_REG_CLASSES];
166 /* Nonzero value of element of the following array means that the
167 1st class is a subset of the 2nd class. */
168 int class_subset_p [N_REG_CLASSES] [N_REG_CLASSES];
170 /* The biggest class inside of intersection of the two classes. */
171 enum reg_class reg_class_subintersect [N_REG_CLASSES] [N_REG_CLASSES];
173 /* Temporary hard reg set used for different calculation. */
174 static HARD_REG_SET temp_hard_regset;
178 /* The function sets up mode_inner_mode array. */
179 static void
180 setup_inner_mode (void)
182 int i;
183 enum machine_mode wider;
185 for (i = 0; i < NUM_MACHINE_MODES; i++)
186 mode_inner_mode [i] = VOIDmode;
187 for (i = 0; i < NUM_MACHINE_MODES; i++)
189 wider = GET_MODE_WIDER_MODE (i);
190 if (wider != VOIDmode)
192 ira_assert (mode_inner_mode [wider] == VOIDmode);
193 mode_inner_mode [wider] = i;
200 /* The function sets up map REG_MODE_HARD_REGSET. */
201 static void
202 setup_reg_mode_hard_regset (void)
204 int i, m, hard_regno;
206 for (m = 0; m < NUM_MACHINE_MODES; m++)
207 for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
209 CLEAR_HARD_REG_SET (reg_mode_hard_regset [hard_regno] [m]);
210 for (i = hard_regno_nregs [hard_regno] [m] - 1; i >= 0; i--)
211 if (hard_regno + i < FIRST_PSEUDO_REGISTER)
212 SET_HARD_REG_BIT (reg_mode_hard_regset [hard_regno] [m],
213 hard_regno + i);
219 /* The function sets up MEMORY_MOVE_COST, REGISTER_MOVE_COST and
220 CLASS_SUBSET_P. */
221 static void
222 setup_class_subset_and_move_costs (void)
224 int cl, cl2;
225 enum machine_mode mode;
227 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
229 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
231 memory_move_cost [mode] [cl] [0] = MEMORY_MOVE_COST (mode, cl, 0);
232 memory_move_cost [mode] [cl] [1] = MEMORY_MOVE_COST (mode, cl, 1);
235 for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
237 if (cl != NO_REGS && cl2 != NO_REGS)
238 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
239 register_move_cost [mode] [cl] [cl2]
240 = REGISTER_MOVE_COST (mode, cl, cl2);
241 class_subset_p [cl] [cl2]
242 = hard_reg_set_subset_p (reg_class_contents[cl],
243 reg_class_contents[cl2]);
248 /* The function sets up REG_CLASS_SUBINTERSECT. */
249 static void
250 setup_reg_class_intersect (void)
252 int cl1, cl2, cl3;
253 HARD_REG_SET temp_set;
255 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
257 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
259 reg_class_subintersect [cl1] [cl2] = NO_REGS;
260 COPY_HARD_REG_SET (temp_set, reg_class_contents [cl1]);
261 AND_HARD_REG_SET (temp_set, reg_class_contents [cl2]);
262 for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
263 if (hard_reg_set_subset_p (reg_class_contents [cl3], temp_set)
264 && ! hard_reg_set_subset_p (reg_class_contents [cl3],
265 reg_class_contents
266 [(int) reg_class_subintersect
267 [cl1] [cl2]]))
268 reg_class_subintersect [cl1] [cl2] = (enum reg_class) cl3;
275 /* Hard registers can not be used for the register allocator for all
276 functions of the current compile unit. */
277 static HARD_REG_SET no_unit_alloc_regs;
279 /* Hard registers which can be used for the allocation of given
280 register class. The order is defined by the allocation order. */
281 short class_hard_regs [N_REG_CLASSES] [FIRST_PSEUDO_REGISTER];
283 /* The size of the above array for given register class. */
284 int class_hard_regs_num [N_REG_CLASSES];
286 /* Index (in class_hard_regs) for given register class and hard
287 register (in general case a hard register can belong to several
288 register classes). */
289 short class_hard_reg_index [N_REG_CLASSES] [FIRST_PSEUDO_REGISTER];
291 /* The function sets up the three arrays declared above. */
292 static void
293 setup_class_hard_regs (void)
295 int cl, i, hard_regno, n;
296 HARD_REG_SET processed_hard_reg_set;
298 ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
299 /* We could call ORDER_REGS_FOR_LOCAL_ALLOC here (it is usually
300 putting hard callee-used hard registers first). But our
301 heuristics work better. */
302 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
304 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents [cl]);
305 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
306 CLEAR_HARD_REG_SET (processed_hard_reg_set);
307 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
309 #ifdef REG_ALLOC_ORDER
310 hard_regno = reg_alloc_order [i];
311 #else
312 hard_regno = i;
313 #endif
314 if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
315 continue;
316 SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
317 if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
318 class_hard_reg_index [cl] [hard_regno] = -1;
319 else
321 class_hard_reg_index [cl] [hard_regno] = n;
322 class_hard_regs [cl] [n++] = hard_regno;
325 class_hard_regs_num [cl] = n;
329 /* Number of class hard registers available for the register
330 allocation for given classes. */
331 int available_class_regs [N_REG_CLASSES];
333 /* Function setting up AVAILABLE_CLASS_REGS. */
334 static void
335 setup_available_class_regs (void)
337 int i, j;
339 memset (available_class_regs, 0, sizeof (available_class_regs));
340 for (i = 0; i < N_REG_CLASSES; i++)
342 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents [i]);
343 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
344 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
345 if (TEST_HARD_REG_BIT (temp_hard_regset, j))
346 available_class_regs [i]++;
350 /* The function setting up different global variables defining hard
351 registers for the allocation. It depends on USE_HARD_FRAME_P whose
352 nonzero value means that we can use hard frame pointer for the
353 allocation. */
354 static void
355 setup_alloc_regs (int use_hard_frame_p)
357 COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_reg_set);
358 if (! use_hard_frame_p)
359 SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
360 setup_class_hard_regs ();
361 setup_available_class_regs ();
366 /* Define the following macro if allocation through malloc if
367 preferable. */
368 /*#define IRA_NO_OBSTACK*/
370 #ifndef IRA_NO_OBSTACK
371 /* Obstack used for storing all dynamic data (except bitmaps) of the
372 IRA. */
373 static struct obstack ira_obstack;
374 #endif
376 /* Obstack used for storing all bitmaps of the IRA. */
377 static struct bitmap_obstack ira_bitmap_obstack;
379 /* The function allocates memory of size LEN for IRA data. */
380 void *
381 ira_allocate (size_t len)
383 void *res;
385 #ifndef IRA_NO_OBSTACK
386 res = obstack_alloc (&ira_obstack, len);
387 #else
388 res = xmalloc (len);
389 #endif
390 return res;
393 /* The function free memory ADDR allocated for IRA data. */
394 void
395 ira_free (void *addr ATTRIBUTE_UNUSED)
397 #ifndef IRA_NO_OBSTACK
398 /* do nothing */
399 #else
400 free (addr);
401 #endif
405 /* The function allocates bitmap for IRA. */
406 bitmap
407 ira_allocate_bitmap (void)
409 return BITMAP_ALLOC (&ira_bitmap_obstack);
412 /* The function frees bitmap B allocated for IRA. */
413 void
414 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
416 /* do nothing */
419 /* The function allocates regset for IRA. */
420 regset
421 ira_allocate_regset (void)
423 return ALLOC_REG_SET (&ira_bitmap_obstack);
426 /* The function frees regset R allocated for IRA. */
427 void
428 ira_free_regset (regset r ATTRIBUTE_UNUSED)
430 /* do nothing */
435 /* The function returns nonzero if hard registers starting with
436 HARD_REGNO and containing value of MODE are not in set
437 HARD_REGSET. */
439 hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
440 HARD_REG_SET hard_regset)
442 int i;
444 ira_assert (hard_regno >= 0);
445 for (i = hard_regno_nregs [hard_regno] [mode] - 1; i >= 0; i--)
446 if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
447 return FALSE;
448 return TRUE;
453 /* The function outputs information about allocation of all allocnos
454 into file F. */
455 void
456 print_disposition (FILE *f)
458 int i, n, max_regno;
459 allocno_t a;
460 basic_block bb;
462 fprintf (f, "Disposition:");
463 max_regno = max_reg_num ();
464 for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
465 for (a = regno_allocno_map [i]; a != NULL; a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
467 if (n % 4 == 0)
468 fprintf (f, "\n");
469 n++;
470 fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
471 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
472 fprintf (f, "b%-3d", bb->index);
473 else
474 fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
475 if (ALLOCNO_HARD_REGNO (a) >= 0)
476 fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
477 else
478 fprintf (f, " mem");
480 fprintf (f, "\n");
483 /* The function outputs information about allocation of all allocnos
484 into stderr. */
485 void
486 debug_disposition (void)
488 print_disposition (stderr);
493 /* For each reg class, table listing all the classes contained in it
494 (excluding the class itself. Fixed registers are excluded from the
495 consideration). */
496 static enum reg_class alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
498 /* The function initializes the tables of subclasses of each reg
499 class. */
500 static void
501 setup_reg_subclasses (void)
503 int i, j;
505 for (i = 0; i < N_REG_CLASSES; i++)
506 for (j = 0; j < N_REG_CLASSES; j++)
507 alloc_reg_class_subclasses [i] [j] = LIM_REG_CLASSES;
509 for (i = 0; i < N_REG_CLASSES; i++)
511 if (i == (int) NO_REGS)
512 continue;
514 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents [i]);
515 AND_COMPL_HARD_REG_SET (temp_hard_regset, fixed_reg_set);
516 if (hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
517 continue;
518 for (j = 0; j < N_REG_CLASSES; j++)
519 if (i != j)
521 enum reg_class *p;
523 if (! hard_reg_set_subset_p (reg_class_contents [i],
524 reg_class_contents [j]))
525 continue;
526 p = &alloc_reg_class_subclasses [j] [0];
527 while (*p != LIM_REG_CLASSES) p++;
528 *p = (enum reg_class) i;
535 /* The value is size of the subsequent array. */
536 int reg_class_cover_size;
538 /* The array containing cover classes whose hard registers are used
539 for the allocation -- see also comments for macro
540 IRA_COVER_CLASSES. */
541 enum reg_class reg_class_cover [N_REG_CLASSES];
543 /* The value is number of elements in the subsequent array. */
544 int important_classes_num;
546 /* The array containing classes which are subclasses of a cover
547 class. */
548 enum reg_class important_classes [N_REG_CLASSES];
550 #ifdef IRA_COVER_CLASSES
552 /* The function checks IRA_COVER_CLASSES and sets the two global
553 variables defined above. */
554 static void
555 setup_cover_classes (void)
557 int i, j;
558 enum reg_class cl;
559 static enum reg_class classes [] = IRA_COVER_CLASSES;
561 reg_class_cover_size = 0;
562 for (i = 0; (cl = classes [i]) != LIM_REG_CLASSES; i++)
564 for (j = 0; j < i; j++)
565 if (reg_classes_intersect_p (cl, classes [j]))
566 gcc_unreachable ();
567 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents [cl]);
568 AND_COMPL_HARD_REG_SET (temp_hard_regset, fixed_reg_set);
569 if (! hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
570 reg_class_cover [reg_class_cover_size++] = cl;
572 important_classes_num = 0;
573 for (cl = 0; cl < N_REG_CLASSES; cl++)
575 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents [cl]);
576 AND_COMPL_HARD_REG_SET (temp_hard_regset, fixed_reg_set);
577 if (! hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
578 for (j = 0; j < reg_class_cover_size; j++)
579 if (hard_reg_set_subset_p (reg_class_contents [cl],
580 reg_class_contents [reg_class_cover [j]]))
581 important_classes [important_classes_num++] = cl;
584 #endif
586 /* Map of register classes to corresponding cover class containing the
587 given class. If given class is not a subset of a cover class, we
588 translate it into the cheapest cover class. */
589 enum reg_class class_translate [N_REG_CLASSES];
591 #ifdef IRA_COVER_CLASSES
593 /* The function sets up array CLASS_TRANSLATE. */
594 static void
595 setup_class_translate (void)
597 enum reg_class cl, cover_class, best_class, *cl_ptr;
598 enum machine_mode mode;
599 int i, cost, min_cost, best_cost;
601 for (cl = 0; cl < N_REG_CLASSES; cl++)
602 class_translate [cl] = NO_REGS;
603 for (i = 0; i < reg_class_cover_size; i++)
605 cover_class = reg_class_cover [i];
606 for (cl_ptr = &alloc_reg_class_subclasses [cover_class] [0];
607 (cl = *cl_ptr) != LIM_REG_CLASSES;
608 cl_ptr++)
610 if (class_translate [cl] == NO_REGS)
611 class_translate [cl] = cover_class;
612 #ifdef ENABLE_IRA_CHECKING
613 else
615 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents [cl]);
616 AND_COMPL_HARD_REG_SET (temp_hard_regset, fixed_reg_set);
617 if (! hard_reg_set_subset_p (temp_hard_regset,
618 zero_hard_reg_set))
619 gcc_unreachable ();
621 #endif
623 class_translate [cover_class] = cover_class;
625 /* For classes which are not fully covered by a cover class (in
626 other words covered by more one cover class), use the cheapest
627 cover class. */
628 for (cl = 0; cl < N_REG_CLASSES; cl++)
630 if (cl == NO_REGS || class_translate [cl] != NO_REGS)
631 continue;
632 best_class = NO_REGS;
633 best_cost = INT_MAX;
634 for (i = 0; i < reg_class_cover_size; i++)
636 cover_class = reg_class_cover [i];
637 COPY_HARD_REG_SET (temp_hard_regset,
638 reg_class_contents [cover_class]);
639 AND_HARD_REG_SET (temp_hard_regset, reg_class_contents [cl]);
640 if (! hard_reg_set_equal_p (temp_hard_regset, zero_hard_reg_set))
642 min_cost = INT_MAX;
643 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
645 cost = (memory_move_cost [mode] [cl] [0]
646 + memory_move_cost [mode] [cl] [1]);
647 if (min_cost > cost)
648 min_cost = cost;
650 if (best_class == NO_REGS || best_cost > min_cost)
652 best_class = cover_class;
653 best_cost = min_cost;
657 class_translate [cl] = best_class;
660 #endif
662 /* The function outputs all cover classes and the translation map into
663 file F. */
664 static void
665 print_class_cover (FILE *f)
667 static const char *const reg_class_names[] = REG_CLASS_NAMES;
668 int i;
670 fprintf (f, "Class cover:\n");
671 for (i = 0; i < reg_class_cover_size; i++)
672 fprintf (f, " %s", reg_class_names [reg_class_cover [i]]);
673 fprintf (f, "\nClass translation:\n");
674 for (i = 0; i < N_REG_CLASSES; i++)
675 fprintf (f, " %s -> %s\n", reg_class_names [i],
676 reg_class_names [class_translate [i]]);
679 /* The function outputs all cover classes and the translation map into
680 stderr. */
681 void
682 debug_class_cover (void)
684 print_class_cover (stderr);
687 /* Function setting up different arrays concerning class subsets and
688 cover classes. */
689 static void
690 find_reg_class_closure (void)
692 setup_reg_subclasses ();
693 #ifdef IRA_COVER_CLASSES
694 setup_cover_classes ();
695 setup_class_translate ();
696 #endif
701 /* Map: register class x machine mode -> number of hard registers of
702 given class needed to store value of given mode. If the number is
703 different, the size will be negative. */
704 int reg_class_nregs [N_REG_CLASSES] [MAX_MACHINE_MODE];
706 /* Maximal value of the previous array elements. */
707 int max_nregs;
709 /* Function forming REG_CLASS_NREGS map. */
710 static void
711 setup_reg_class_nregs (void)
713 int m;
714 enum reg_class cl;
716 max_nregs = -1;
717 for (cl = 0; cl < N_REG_CLASSES; cl++)
718 for (m = 0; m < MAX_MACHINE_MODE; m++)
720 reg_class_nregs [cl] [m] = CLASS_MAX_NREGS (cl, m);
721 if (max_nregs < reg_class_nregs [cl] [m])
722 max_nregs = reg_class_nregs [cl] [m];
728 /* Array whose values are hard regset of hard registers of given
729 register class whose HARD_REGNO_MODE_OK values are zero. */
730 HARD_REG_SET prohibited_class_mode_regs [N_REG_CLASSES] [NUM_MACHINE_MODES];
732 /* The function setting up PROHIBITED_CLASS_MODE_REGS. */
733 static void
734 setup_prohibited_class_mode_regs (void)
736 int i, j, k, hard_regno;
737 enum reg_class cl;
739 for (i = 0; i < reg_class_cover_size; i++)
741 cl = reg_class_cover [i];
742 for (j = 0; j < NUM_MACHINE_MODES; j++)
744 CLEAR_HARD_REG_SET (prohibited_class_mode_regs [cl] [j]);
745 for (k = class_hard_regs_num [cl] - 1; k >= 0; k--)
747 hard_regno = class_hard_regs [cl] [k];
748 if (! HARD_REGNO_MODE_OK (hard_regno, j))
749 SET_HARD_REG_BIT (prohibited_class_mode_regs [cl] [j],
750 hard_regno);
758 /* Hard regsets whose all bits are correspondingly zero or one. */
759 HARD_REG_SET zero_hard_reg_set;
760 HARD_REG_SET one_hard_reg_set;
762 /* Function called once during compiler work. It sets up different
763 arrays whose values don't depend on the compiled function. */
764 void
765 init_ira_once (void)
767 CLEAR_HARD_REG_SET (zero_hard_reg_set);
768 SET_HARD_REG_SET (one_hard_reg_set);
769 setup_inner_mode ();
770 setup_reg_mode_hard_regset ();
771 setup_class_subset_and_move_costs ();
772 setup_reg_class_intersect ();
773 setup_alloc_regs (flag_omit_frame_pointer != 0);
774 find_reg_class_closure ();
775 setup_reg_class_nregs ();
776 setup_prohibited_class_mode_regs ();
777 init_ira_costs_once ();
782 /* Function specific hard registers excluded from the allocation. */
783 HARD_REG_SET no_alloc_regs;
785 /* Return true if *LOC contains an asm. */
787 static int
788 insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
790 if ( !*loc)
791 return 0;
792 if (GET_CODE (*loc) == ASM_OPERANDS)
793 return 1;
794 return 0;
798 /* Return true if INSN contains an ASM. */
800 static int
801 insn_contains_asm (rtx insn)
803 return for_each_rtx (&insn, insn_contains_asm_1, NULL);
806 /* Set up regs_asm_clobbered. */
807 static void
808 compute_regs_asm_clobbered (char *regs_asm_clobbered)
810 basic_block bb;
812 memset (regs_asm_clobbered, 0, sizeof (char) * FIRST_PSEUDO_REGISTER);
814 FOR_EACH_BB (bb)
816 rtx insn;
817 FOR_BB_INSNS_REVERSE (bb, insn)
819 struct df_ref **def_rec;
821 if (insn_contains_asm (insn))
822 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
824 struct df_ref *def = *def_rec;
825 unsigned int dregno = DF_REF_REGNO (def);
826 if (dregno < FIRST_PSEUDO_REGISTER)
828 unsigned int i;
829 enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (def));
830 unsigned int end = dregno
831 + hard_regno_nregs [dregno] [mode] - 1;
833 for (i = dregno; i <= end; ++i)
834 regs_asm_clobbered [i] = 1;
842 /* The function sets up ELIMINABLE_REGSET, NO_ALLOC_REGS, and
843 REGS_EVER_LIVE. */
844 static void
845 setup_eliminable_regset (void)
847 int i;
848 /* Like regs_ever_live, but 1 if a reg is set or clobbered from an
849 asm. Unlike regs_ever_live, elements of this array corresponding
850 to eliminable regs like the frame pointer are set if an asm sets
851 them. */
852 char *regs_asm_clobbered = alloca (FIRST_PSEUDO_REGISTER * sizeof (char));
853 #ifdef ELIMINABLE_REGS
854 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
855 #endif
856 int need_fp
857 = (! flag_omit_frame_pointer
858 || (current_function_calls_alloca && EXIT_IGNORE_STACK)
859 || FRAME_POINTER_REQUIRED);
861 COPY_HARD_REG_SET (no_alloc_regs, no_unit_alloc_regs);
862 CLEAR_HARD_REG_SET (eliminable_regset);
864 compute_regs_asm_clobbered (regs_asm_clobbered);
865 /* Build the regset of all eliminable registers and show we can't
866 use those that we already know won't be eliminated. */
867 #ifdef ELIMINABLE_REGS
868 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
870 bool cannot_elim
871 = (! CAN_ELIMINATE (eliminables [i].from, eliminables [i].to)
872 || (eliminables [i].to == STACK_POINTER_REGNUM && need_fp));
874 if (! regs_asm_clobbered [eliminables [i].from])
876 SET_HARD_REG_BIT (eliminable_regset, eliminables [i].from);
878 if (cannot_elim)
879 SET_HARD_REG_BIT (no_alloc_regs, eliminables[i].from);
881 else if (cannot_elim)
882 error ("%s cannot be used in asm here",
883 reg_names [eliminables [i].from]);
884 else
885 df_set_regs_ever_live (eliminables[i].from, true);
887 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
888 if (! regs_asm_clobbered [HARD_FRAME_POINTER_REGNUM])
890 SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
891 if (need_fp)
892 SET_HARD_REG_BIT (no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
894 else if (need_fp)
895 error ("%s cannot be used in asm here",
896 reg_names [HARD_FRAME_POINTER_REGNUM]);
897 else
898 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
899 #endif
901 #else
902 if (! regs_asm_clobbered [FRAME_POINTER_REGNUM])
904 SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
905 if (need_fp)
906 SET_HARD_REG_BIT (no_alloc_regs, FRAME_POINTER_REGNUM);
908 else if (need_fp)
909 error ("%s cannot be used in asm here", reg_names [FRAME_POINTER_REGNUM]);
910 else
911 df_set_regs_ever_live (FRAME_POINTER_REGNUM, true);
912 #endif
917 /* The element value is nonzero if the corresponding regno value is
918 invariant. */
919 int *reg_equiv_invariant_p;
921 /* The element value is equiv constant or NULL_RTX. */
922 rtx *reg_equiv_const;
924 /* The function sets up the two array declaraed above. */
925 static void
926 find_reg_equiv_invariant_const (void)
928 int i, invariant_p;
929 rtx list, insn, note, constant, x;
931 for (i = FIRST_PSEUDO_REGISTER; i < reg_equiv_init_size; i++)
933 constant = NULL_RTX;
934 invariant_p = FALSE;
935 for (list = reg_equiv_init [i]; list != NULL_RTX; list = XEXP (list, 1))
937 insn = XEXP (list, 0);
938 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
940 if (note == NULL_RTX)
941 continue;
943 x = XEXP (note, 0);
945 if (! function_invariant_p (x)
946 || ! flag_pic
947 /* A function invariant is often CONSTANT_P but may
948 include a register. We promise to only pass CONSTANT_P
949 objects to LEGITIMATE_PIC_OPERAND_P. */
950 || (CONSTANT_P (x) && LEGITIMATE_PIC_OPERAND_P (x)))
952 /* It can happen that a REG_EQUIV note contains a MEM that
953 is not a legitimate memory operand. As later stages of
954 reload assume that all addresses found in the
955 reg_equiv_* arrays were originally legitimate, we
956 ignore such REG_EQUIV notes. */
957 if (memory_operand (x, VOIDmode))
958 continue;
959 else if (function_invariant_p (x))
961 if (GET_CODE (x) == PLUS
962 || x == frame_pointer_rtx || x == arg_pointer_rtx)
963 invariant_p = TRUE;
964 else
965 constant = x;
969 reg_equiv_invariant_p [i] = invariant_p;
970 reg_equiv_const [i] = constant;
976 /* The function sets up REG_RENUMBER and CALLER_SAVE_NEEDED used by
977 reload from the allocation found by IRA. If AFTER_EMIT_P, the
978 function is called after emitting the move insns, otherwise if
979 AFTER_CALL_P, the function is called right after splitting allocnos
980 around calls. */
981 static void
982 setup_reg_renumber (int after_emit_p, int after_call_p)
984 int i, regno, hard_regno;
985 allocno_t a;
987 caller_save_needed = 0;
988 for (i = 0; i < allocnos_num; i++)
990 a = allocnos [i];
991 if (ALLOCNO_CAP_MEMBER (a) != NULL)
992 /* It is a cap. */
993 continue;
994 if (! ALLOCNO_ASSIGNED_P (a))
995 ALLOCNO_ASSIGNED_P (a) = TRUE;
996 ira_assert (ALLOCNO_ASSIGNED_P (a));
997 hard_regno = ALLOCNO_HARD_REGNO (a);
998 regno = after_emit_p ? (int) REGNO (ALLOCNO_REG (a)) : ALLOCNO_REGNO (a);
999 reg_renumber [regno] = (hard_regno < 0 ? -1 : hard_regno);
1000 if (hard_regno >= 0 && ALLOCNO_CALLS_CROSSED_NUM (a) != 0
1001 && ! hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
1002 call_used_reg_set))
1004 ira_assert
1005 ((! after_call_p && flag_caller_saves)
1006 || (flag_caller_saves && ! flag_ira_split_around_calls)
1007 || reg_equiv_const [regno] || reg_equiv_invariant_p [regno]);
1008 caller_save_needed = 1;
1013 /* The function sets up allocno assignment from reg_renumber. If the
1014 cover class of a allocno does not correspond to the hard register,
1015 return TRUE and mark the allocno as unassigned. */
1016 static int
1017 setup_allocno_assignment_from_reg_renumber (void)
1019 int i, hard_regno;
1020 allocno_t a;
1021 int result = FALSE;
1023 for (i = 0; i < allocnos_num; i++)
1025 a = allocnos [i];
1026 hard_regno = ALLOCNO_HARD_REGNO (a) = reg_renumber [ALLOCNO_REGNO (a)];
1027 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1028 if (hard_regno >= 0
1029 && hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
1030 reg_class_contents
1031 [ALLOCNO_COVER_CLASS (a)]))
1032 result = TRUE;
1033 else
1034 ALLOCNO_ASSIGNED_P (a) = TRUE;
1036 return result;
1039 /* The function evaluates overall allocation cost and costs for using
1040 registers and memory for allocnos. */
1042 static void
1043 calculate_allocation_cost (void)
1045 int i, hard_regno, cost;
1046 allocno_t a;
1048 overall_cost = reg_cost = mem_cost = 0;
1049 for (i = 0; i < allocnos_num; i++)
1051 a = allocnos [i];
1052 hard_regno = ALLOCNO_HARD_REGNO (a);
1053 ira_assert (hard_regno < 0
1054 || ! hard_reg_not_in_set_p
1055 (hard_regno, ALLOCNO_MODE (a),
1056 reg_class_contents [ALLOCNO_COVER_CLASS (a)]));
1057 if (hard_regno < 0)
1059 cost = ALLOCNO_MEMORY_COST (a);
1060 mem_cost += cost;
1062 else
1064 cost = (ALLOCNO_HARD_REG_COSTS (a)
1065 [class_hard_reg_index
1066 [ALLOCNO_COVER_CLASS (a)] [hard_regno]]);
1067 reg_cost += cost;
1069 overall_cost += cost;
1072 if (ira_dump_file != NULL)
1074 fprintf (ira_dump_file,
1075 "+++Costs: overall %d, reg %d, mem %d, ld %d, st %d, move %d\n",
1076 overall_cost, reg_cost, mem_cost,
1077 load_cost, store_cost, shuffle_cost);
1078 fprintf (ira_dump_file, "+++ move loops %d, new jumps %d\n",
1079 move_loops_num, additional_jumps_num);
1084 #ifdef ENABLE_IRA_CHECKING
1085 /* The function checks correctness of the allocation. */
1086 static void
1087 check_allocation (void)
1089 allocno_t a, conflict_a, *allocno_vec;
1090 int i, hard_regno, conflict_hard_regno, j, nregs, conflict_nregs;
1092 for (i = 0; i < allocnos_num; i++)
1094 a = allocnos [i];
1095 if (ALLOCNO_CAP_MEMBER (a) != NULL
1096 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1097 continue;
1098 nregs = hard_regno_nregs [hard_regno] [ALLOCNO_MODE (a)];
1099 allocno_vec = ALLOCNO_CONFLICT_ALLOCNO_VEC (a);
1100 for (j = 0; (conflict_a = allocno_vec [j]) != NULL; j++)
1101 if ((conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a)) >= 0)
1103 conflict_nregs
1104 = (hard_regno_nregs
1105 [conflict_hard_regno] [ALLOCNO_MODE (conflict_a)]);
1106 if ((conflict_hard_regno <= hard_regno
1107 && hard_regno < conflict_hard_regno + conflict_nregs)
1108 || (hard_regno <= conflict_hard_regno
1109 && conflict_hard_regno < hard_regno + nregs))
1111 fprintf (stderr, "bad allocation for %d and %d\n",
1112 ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
1113 gcc_unreachable ();
1118 #endif
1120 /* The function fixes values of array REG_EQUIV_INIT after live range
1121 splitting done by IRA. */
1122 static void
1123 fix_reg_equiv_init (void)
1125 int max_regno = max_reg_num ();
1126 int i, new_regno;
1127 rtx x, prev, next, insn, set;
1130 if (reg_equiv_init_size < max_regno)
1132 reg_equiv_init = ggc_realloc (reg_equiv_init, max_regno * sizeof (rtx));
1133 while (reg_equiv_init_size < max_regno)
1134 reg_equiv_init [reg_equiv_init_size++] = NULL_RTX;
1135 for (i = FIRST_PSEUDO_REGISTER; i < reg_equiv_init_size; i++)
1136 for (prev = NULL_RTX, x = reg_equiv_init [i]; x != NULL_RTX; x = next)
1138 next = XEXP (x, 1);
1139 insn = XEXP (x, 0);
1140 set = single_set (insn);
1141 ira_assert (set != NULL_RTX
1142 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
1143 if (REG_P (SET_DEST (set))
1144 && ((int) REGNO (SET_DEST (set)) == i
1145 || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
1146 new_regno = REGNO (SET_DEST (set));
1147 else if (REG_P (SET_SRC (set))
1148 && ((int) REGNO (SET_SRC (set)) == i
1149 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
1150 new_regno = REGNO (SET_SRC (set));
1151 else
1152 gcc_unreachable ();
1153 if (new_regno == i)
1154 prev = x;
1155 else
1157 if (prev == NULL_RTX)
1158 reg_equiv_init [i] = next;
1159 else
1160 XEXP (prev, 1) = next;
1161 XEXP (x, 1) = reg_equiv_init [new_regno];
1162 reg_equiv_init [new_regno] = x;
1168 #ifdef ENABLE_IRA_CHECKING
1169 /* The function prints redundant memory memory copies. */
1170 static void
1171 print_redundant_copies (void)
1173 int i, hard_regno;
1174 allocno_t a;
1175 copy_t cp, next_cp;
1177 for (i = 0; i < allocnos_num; i++)
1179 a = allocnos [i];
1180 if (ALLOCNO_CAP_MEMBER (a) != NULL)
1181 /* It is a cap. */
1182 continue;
1183 hard_regno = ALLOCNO_HARD_REGNO (a);
1184 if (hard_regno >= 0)
1185 continue;
1186 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1187 if (cp->first == a)
1188 next_cp = cp->next_first_allocno_copy;
1189 else
1191 next_cp = cp->next_second_allocno_copy;
1192 if (ira_dump_file != NULL && cp->move_insn != NULL_RTX
1193 && ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
1194 fprintf (ira_dump_file, "move %d(freq %d):%d\n",
1195 INSN_UID (cp->move_insn), cp->freq, hard_regno);
1199 #endif
1201 /* Setup preferred and alternative classes for pseudo-registers for
1202 other passes. */
1203 static void
1204 setup_preferred_alternate_classes (void)
1206 int i;
1207 enum reg_class cover_class;
1208 allocno_t a;
1210 for (i = 0; i < allocnos_num; i++)
1212 a = allocnos [i];
1213 cover_class = ALLOCNO_COVER_CLASS (a);
1214 if (cover_class == NO_REGS)
1215 cover_class = GENERAL_REGS;
1216 setup_reg_classes (ALLOCNO_REGNO (a), cover_class, NO_REGS);
1222 static void
1223 expand_reg_info (int old_size)
1225 int i;
1226 int size = max_reg_num ();
1228 resize_reg_info ();
1229 for (i = old_size; i < size; i++)
1231 reg_renumber [i] = -1;
1232 setup_reg_classes (i, GENERAL_REGS, ALL_REGS);
1238 /* The value of max_regn_num () correspondingly before the allocator
1239 and before splitting allocnos around calls. */
1240 int ira_max_regno_before;
1241 int ira_max_regno_call_before;
1243 /* Flags for each regno (existing before the splitting allocnos around
1244 calls) about that the corresponding register crossed a call. */
1245 char *original_regno_call_crossed_p;
1247 /* This is the main entry of IRA. */
1248 void
1249 ira (FILE *f)
1251 int i, overall_cost_before, loops_p, allocated_size;
1252 int rebuild_p;
1253 allocno_t a;
1255 ira_dump_file = f;
1257 df_note_add_problem ();
1259 if (optimize > 1)
1260 df_remove_problem (df_live);
1261 /* Create a new version of df that has the special version of UR if
1262 we are doing optimization. */
1263 if (optimize)
1264 df_urec_add_problem ();
1265 df_analyze ();
1267 df_clear_flags (DF_NO_INSN_RESCAN);
1269 regstat_init_n_sets_and_refs ();
1270 regstat_compute_ri ();
1271 rebuild_p = update_equiv_regs ();
1272 regstat_free_n_sets_and_refs ();
1273 regstat_free_ri ();
1275 #ifndef IRA_NO_OBSTACK
1276 gcc_obstack_init (&ira_obstack);
1277 #endif
1278 bitmap_obstack_initialize (&ira_bitmap_obstack);
1280 ira_max_regno_call_before = ira_max_regno_before = max_reg_num ();
1281 reg_equiv_invariant_p = ira_allocate (max_reg_num () * sizeof (int));
1282 memset (reg_equiv_invariant_p, 0, max_reg_num () * sizeof (int));
1283 reg_equiv_const = ira_allocate (max_reg_num () * sizeof (rtx));
1284 memset (reg_equiv_const, 0, max_reg_num () * sizeof (rtx));
1285 find_reg_equiv_invariant_const ();
1286 if (rebuild_p)
1288 timevar_push (TV_JUMP);
1289 rebuild_jump_labels (get_insns ());
1290 purge_all_dead_edges ();
1291 timevar_pop (TV_JUMP);
1293 allocated_size = max_reg_num ();
1294 allocate_reg_info ();
1295 setup_eliminable_regset ();
1297 if (optimize)
1298 df_remove_problem (df_urec);
1300 overall_cost = reg_cost = mem_cost = 0;
1301 load_cost = store_cost = shuffle_cost = 0;
1302 move_loops_num = additional_jumps_num = 0;
1303 loops_p = ira_build (flag_ira_algorithm == IRA_ALGORITHM_REGIONAL
1304 || flag_ira_algorithm == IRA_ALGORITHM_MIXED);
1305 ira_color ();
1307 ira_emit ();
1309 max_regno = max_reg_num ();
1311 expand_reg_info (allocated_size);
1312 allocated_size = max_regno;
1314 setup_reg_renumber (TRUE, FALSE);
1316 if (loops_p)
1318 /* Even if new registers are not created rebuild IRA internal
1319 representation to use correct regno allocno map. */
1320 ira_destroy ();
1321 ira_build (FALSE);
1322 if (setup_allocno_assignment_from_reg_renumber ())
1324 reassign_conflict_allocnos (max_regno, FALSE);
1325 setup_reg_renumber (FALSE, FALSE);
1329 original_regno_call_crossed_p = ira_allocate (max_regno * sizeof (char));
1331 for (i = 0; i < allocnos_num; i++)
1333 a = allocnos [i];
1334 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1335 original_regno_call_crossed_p [ALLOCNO_REGNO (a)]
1336 = ALLOCNO_CALLS_CROSSED_NUM (a) != 0;
1338 ira_max_regno_call_before = max_reg_num ();
1339 if (flag_caller_saves && flag_ira_split_around_calls)
1341 if (split_around_calls ())
1343 ira_destroy ();
1344 max_regno = max_reg_num ();
1345 expand_reg_info (allocated_size);
1346 allocated_size = max_regno;
1347 for (i = ira_max_regno_call_before; i < max_regno; i++)
1348 reg_renumber [i] = -1;
1349 ira_build (FALSE);
1350 setup_allocno_assignment_from_reg_renumber ();
1351 reassign_conflict_allocnos ((flag_ira_assign_after_call_split
1352 ? ira_max_regno_call_before
1353 : max_reg_num ()), TRUE);
1354 setup_reg_renumber (FALSE, TRUE);
1358 calculate_allocation_cost ();
1360 #ifdef ENABLE_IRA_CHECKING
1361 check_allocation ();
1362 #endif
1364 setup_preferred_alternate_classes ();
1366 max_regno = max_reg_num ();
1367 delete_trivially_dead_insns (get_insns (), max_regno);
1368 max_regno = max_reg_num ();
1370 /* Determine if the current function is a leaf before running IRA
1371 since this can impact optimizations done by the prologue and
1372 epilogue thus changing register elimination offsets. */
1373 current_function_is_leaf = leaf_function_p ();
1375 /* And the reg_equiv_memory_loc array. */
1376 VEC_safe_grow (rtx, gc, reg_equiv_memory_loc_vec, max_regno);
1377 memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
1378 sizeof (rtx) * max_regno);
1379 reg_equiv_memory_loc = VEC_address (rtx, reg_equiv_memory_loc_vec);
1381 allocate_initial_values (reg_equiv_memory_loc);
1383 regstat_init_n_sets_and_refs ();
1384 regstat_compute_ri ();
1386 fix_reg_equiv_init ();
1388 #ifdef ENABLE_IRA_CHECKING
1389 print_redundant_copies ();
1390 #endif
1392 overall_cost_before = overall_cost;
1394 spilled_reg_stack_slots_num = 0;
1395 spilled_reg_stack_slots
1396 = ira_allocate (max_regno * sizeof (struct spilled_reg_stack_slot));
1398 df_set_flags (DF_NO_INSN_RESCAN);
1399 build_insn_chain (get_insns ());
1400 reload_completed = ! reload (get_insns (), 1);
1402 ira_free (spilled_reg_stack_slots);
1404 if (ira_dump_file != NULL && overall_cost_before != overall_cost)
1405 fprintf (ira_dump_file, "+++Overall after reload %d\n", overall_cost);
1407 ira_destroy ();
1409 cleanup_cfg (CLEANUP_EXPENSIVE);
1411 regstat_free_ri ();
1412 regstat_free_n_sets_and_refs ();
1414 ira_free (original_regno_call_crossed_p);
1415 ira_free (reg_equiv_invariant_p);
1416 ira_free (reg_equiv_const);
1418 bitmap_obstack_release (&ira_bitmap_obstack);
1419 #ifndef IRA_NO_OBSTACK
1420 obstack_free (&ira_obstack, NULL);
1421 #endif
1423 reload_completed = 1;
1425 /* The code after the reload has changed so much that at this point
1426 we might as well just rescan everything. Not that
1427 df_rescan_all_insns is not going to help here because it does not
1428 touch the artificial uses and defs. */
1429 df_finish_pass (true);
1430 if (optimize > 1)
1431 df_live_add_problem ();
1432 df_scan_alloc (NULL);
1433 df_scan_blocks ();
1435 if (optimize)
1436 df_analyze ();
1441 static bool
1442 gate_ira (void)
1444 return flag_ira != 0;
1447 /* Run the integrated register allocator. */
1448 static unsigned int
1449 rest_of_handle_ira (void)
1451 ira (dump_file);
1452 return 0;
1455 struct tree_opt_pass pass_ira =
1457 "ira", /* name */
1458 gate_ira, /* gate */
1459 rest_of_handle_ira, /* execute */
1460 NULL, /* sub */
1461 NULL, /* next */
1462 0, /* static_pass_number */
1463 TV_IRA, /* tv_id */
1464 0, /* properties_required */
1465 0, /* properties_provided */
1466 0, /* properties_destroyed */
1467 0, /* todo_flags_start */
1468 TODO_dump_func |
1469 TODO_ggc_collect, /* todo_flags_finish */
1470 'y' /* letter */