* dwarf2out.c (compare_loc_descriptor, scompare_loc_descriptor,
[official-gcc.git] / gcc / ira.c
blob222d48eb1c450b4c79681feb6133db2c1f397899
1 /* Integrated Register Allocator (IRA) entry point.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* The integrated register allocator (IRA) is a
23 regional register allocator performing graph coloring on a top-down
24 traversal of nested regions. Graph coloring in a region is based
25 on Chaitin-Briggs algorithm. It is called integrated because
26 register coalescing, register live range splitting, and choosing a
27 better hard register are done on-the-fly during coloring. Register
28 coalescing and choosing a cheaper hard register is done by hard
29 register preferencing during hard register assigning. The live
30 range splitting is a byproduct of the regional register allocation.
32 Major IRA notions are:
34 o *Region* is a part of CFG where graph coloring based on
35 Chaitin-Briggs algorithm is done. IRA can work on any set of
36 nested CFG regions forming a tree. Currently the regions are
37 the entire function for the root region and natural loops for
38 the other regions. Therefore data structure representing a
39 region is called loop_tree_node.
41 o *Allocno class* is a register class used for allocation of
42 given allocno. It means that only hard register of given
43 register class can be assigned to given allocno. In reality,
44 even smaller subset of (*profitable*) hard registers can be
45 assigned. In rare cases, the subset can be even smaller
46 because our modification of Chaitin-Briggs algorithm requires
47 that sets of hard registers can be assigned to allocnos forms a
48 forest, i.e. the sets can be ordered in a way where any
49 previous set is not intersected with given set or is a superset
50 of given set.
52 o *Pressure class* is a register class belonging to a set of
53 register classes containing all of the hard-registers available
54 for register allocation. The set of all pressure classes for a
55 target is defined in the corresponding machine-description file
56 according some criteria. Register pressure is calculated only
57 for pressure classes and it affects some IRA decisions as
58 forming allocation regions.
60 o *Allocno* represents the live range of a pseudo-register in a
61 region. Besides the obvious attributes like the corresponding
62 pseudo-register number, allocno class, conflicting allocnos and
63 conflicting hard-registers, there are a few allocno attributes
64 which are important for understanding the allocation algorithm:
66 - *Live ranges*. This is a list of ranges of *program points*
67 where the allocno lives. Program points represent places
68 where a pseudo can be born or become dead (there are
69 approximately two times more program points than the insns)
70 and they are represented by integers starting with 0. The
71 live ranges are used to find conflicts between allocnos.
72 They also play very important role for the transformation of
73 the IRA internal representation of several regions into a one
74 region representation. The later is used during the reload
75 pass work because each allocno represents all of the
76 corresponding pseudo-registers.
78 - *Hard-register costs*. This is a vector of size equal to the
79 number of available hard-registers of the allocno class. The
80 cost of a callee-clobbered hard-register for an allocno is
81 increased by the cost of save/restore code around the calls
82 through the given allocno's life. If the allocno is a move
83 instruction operand and another operand is a hard-register of
84 the allocno class, the cost of the hard-register is decreased
85 by the move cost.
87 When an allocno is assigned, the hard-register with minimal
88 full cost is used. Initially, a hard-register's full cost is
89 the corresponding value from the hard-register's cost vector.
90 If the allocno is connected by a *copy* (see below) to
91 another allocno which has just received a hard-register, the
92 cost of the hard-register is decreased. Before choosing a
93 hard-register for an allocno, the allocno's current costs of
94 the hard-registers are modified by the conflict hard-register
95 costs of all of the conflicting allocnos which are not
96 assigned yet.
98 - *Conflict hard-register costs*. This is a vector of the same
99 size as the hard-register costs vector. To permit an
100 unassigned allocno to get a better hard-register, IRA uses
101 this vector to calculate the final full cost of the
102 available hard-registers. Conflict hard-register costs of an
103 unassigned allocno are also changed with a change of the
104 hard-register cost of the allocno when a copy involving the
105 allocno is processed as described above. This is done to
106 show other unassigned allocnos that a given allocno prefers
107 some hard-registers in order to remove the move instruction
108 corresponding to the copy.
110 o *Cap*. If a pseudo-register does not live in a region but
111 lives in a nested region, IRA creates a special allocno called
112 a cap in the outer region. A region cap is also created for a
113 subregion cap.
115 o *Copy*. Allocnos can be connected by copies. Copies are used
116 to modify hard-register costs for allocnos during coloring.
117 Such modifications reflects a preference to use the same
118 hard-register for the allocnos connected by copies. Usually
119 copies are created for move insns (in this case it results in
120 register coalescing). But IRA also creates copies for operands
121 of an insn which should be assigned to the same hard-register
122 due to constraints in the machine description (it usually
123 results in removing a move generated in reload to satisfy
124 the constraints) and copies referring to the allocno which is
125 the output operand of an instruction and the allocno which is
126 an input operand dying in the instruction (creation of such
127 copies results in less register shuffling). IRA *does not*
128 create copies between the same register allocnos from different
129 regions because we use another technique for propagating
130 hard-register preference on the borders of regions.
132 Allocnos (including caps) for the upper region in the region tree
133 *accumulate* information important for coloring from allocnos with
134 the same pseudo-register from nested regions. This includes
135 hard-register and memory costs, conflicts with hard-registers,
136 allocno conflicts, allocno copies and more. *Thus, attributes for
137 allocnos in a region have the same values as if the region had no
138 subregions*. It means that attributes for allocnos in the
139 outermost region corresponding to the function have the same values
140 as though the allocation used only one region which is the entire
141 function. It also means that we can look at IRA work as if the
142 first IRA did allocation for all function then it improved the
143 allocation for loops then their subloops and so on.
145 IRA major passes are:
147 o Building IRA internal representation which consists of the
148 following subpasses:
150 * First, IRA builds regions and creates allocnos (file
151 ira-build.c) and initializes most of their attributes.
153 * Then IRA finds an allocno class for each allocno and
154 calculates its initial (non-accumulated) cost of memory and
155 each hard-register of its allocno class (file ira-cost.c).
157 * IRA creates live ranges of each allocno, calulates register
158 pressure for each pressure class in each region, sets up
159 conflict hard registers for each allocno and info about calls
160 the allocno lives through (file ira-lives.c).
162 * IRA removes low register pressure loops from the regions
163 mostly to speed IRA up (file ira-build.c).
165 * IRA propagates accumulated allocno info from lower region
166 allocnos to corresponding upper region allocnos (file
167 ira-build.c).
169 * IRA creates all caps (file ira-build.c).
171 * Having live-ranges of allocnos and their classes, IRA creates
172 conflicting allocnos for each allocno. Conflicting allocnos
173 are stored as a bit vector or array of pointers to the
174 conflicting allocnos whatever is more profitable (file
175 ira-conflicts.c). At this point IRA creates allocno copies.
177 o Coloring. Now IRA has all necessary info to start graph coloring
178 process. It is done in each region on top-down traverse of the
179 region tree (file ira-color.c). There are following subpasses:
181 * Finding profitable hard registers of corresponding allocno
182 class for each allocno. For example, only callee-saved hard
183 registers are frequently profitable for allocnos living
184 through colors. If the profitable hard register set of
185 allocno does not form a tree based on subset relation, we use
186 some approximation to form the tree. This approximation is
187 used to figure out trivial colorability of allocnos. The
188 approximation is a pretty rare case.
190 * Putting allocnos onto the coloring stack. IRA uses Briggs
191 optimistic coloring which is a major improvement over
192 Chaitin's coloring. Therefore IRA does not spill allocnos at
193 this point. There is some freedom in the order of putting
194 allocnos on the stack which can affect the final result of
195 the allocation. IRA uses some heuristics to improve the
196 order.
198 We also use a modification of Chaitin-Briggs algorithm which
199 works for intersected register classes of allocnos. To
200 figure out trivial colorability of allocnos, the mentioned
201 above tree of hard register sets is used. To get an idea how
202 the algorithm works in i386 example, let us consider an
203 allocno to which any general hard register can be assigned.
204 If the allocno conflicts with eight allocnos to which only
205 EAX register can be assigned, given allocno is still
206 trivially colorable because all conflicting allocnos might be
207 assigned only to EAX and all other general hard registers are
208 still free.
210 To get an idea of the used trivial colorability criterion, it
211 is also useful to read article "Graph-Coloring Register
212 Allocation for Irregular Architectures" by Michael D. Smith
213 and Glen Holloway. Major difference between the article
214 approach and approach used in IRA is that Smith's approach
215 takes register classes only from machine description and IRA
216 calculate register classes from intermediate code too
217 (e.g. an explicit usage of hard registers in RTL code for
218 parameter passing can result in creation of additional
219 register classes which contain or exclude the hard
220 registers). That makes IRA approach useful for improving
221 coloring even for architectures with regular register files
222 and in fact some benchmarking shows the improvement for
223 regular class architectures is even bigger than for irregular
224 ones. Another difference is that Smith's approach chooses
225 intersection of classes of all insn operands in which a given
226 pseudo occurs. IRA can use bigger classes if it is still
227 more profitable than memory usage.
229 * Popping the allocnos from the stack and assigning them hard
230 registers. If IRA can not assign a hard register to an
231 allocno and the allocno is coalesced, IRA undoes the
232 coalescing and puts the uncoalesced allocnos onto the stack in
233 the hope that some such allocnos will get a hard register
234 separately. If IRA fails to assign hard register or memory
235 is more profitable for it, IRA spills the allocno. IRA
236 assigns the allocno the hard-register with minimal full
237 allocation cost which reflects the cost of usage of the
238 hard-register for the allocno and cost of usage of the
239 hard-register for allocnos conflicting with given allocno.
241 * Chaitin-Briggs coloring assigns as many pseudos as possible
242 to hard registers. After coloringh we try to improve
243 allocation with cost point of view. We improve the
244 allocation by spilling some allocnos and assigning the freed
245 hard registers to other allocnos if it decreases the overall
246 allocation cost.
248 * After allono assigning in the region, IRA modifies the hard
249 register and memory costs for the corresponding allocnos in
250 the subregions to reflect the cost of possible loads, stores,
251 or moves on the border of the region and its subregions.
252 When default regional allocation algorithm is used
253 (-fira-algorithm=mixed), IRA just propagates the assignment
254 for allocnos if the register pressure in the region for the
255 corresponding pressure class is less than number of available
256 hard registers for given pressure class.
258 o Spill/restore code moving. When IRA performs an allocation
259 by traversing regions in top-down order, it does not know what
260 happens below in the region tree. Therefore, sometimes IRA
261 misses opportunities to perform a better allocation. A simple
262 optimization tries to improve allocation in a region having
263 subregions and containing in another region. If the
264 corresponding allocnos in the subregion are spilled, it spills
265 the region allocno if it is profitable. The optimization
266 implements a simple iterative algorithm performing profitable
267 transformations while they are still possible. It is fast in
268 practice, so there is no real need for a better time complexity
269 algorithm.
271 o Code change. After coloring, two allocnos representing the
272 same pseudo-register outside and inside a region respectively
273 may be assigned to different locations (hard-registers or
274 memory). In this case IRA creates and uses a new
275 pseudo-register inside the region and adds code to move allocno
276 values on the region's borders. This is done during top-down
277 traversal of the regions (file ira-emit.c). In some
278 complicated cases IRA can create a new allocno to move allocno
279 values (e.g. when a swap of values stored in two hard-registers
280 is needed). At this stage, the new allocno is marked as
281 spilled. IRA still creates the pseudo-register and the moves
282 on the region borders even when both allocnos were assigned to
283 the same hard-register. If the reload pass spills a
284 pseudo-register for some reason, the effect will be smaller
285 because another allocno will still be in the hard-register. In
286 most cases, this is better then spilling both allocnos. If
287 reload does not change the allocation for the two
288 pseudo-registers, the trivial move will be removed by
289 post-reload optimizations. IRA does not generate moves for
290 allocnos assigned to the same hard register when the default
291 regional allocation algorithm is used and the register pressure
292 in the region for the corresponding pressure class is less than
293 number of available hard registers for given pressure class.
294 IRA also does some optimizations to remove redundant stores and
295 to reduce code duplication on the region borders.
297 o Flattening internal representation. After changing code, IRA
298 transforms its internal representation for several regions into
299 one region representation (file ira-build.c). This process is
300 called IR flattening. Such process is more complicated than IR
301 rebuilding would be, but is much faster.
303 o After IR flattening, IRA tries to assign hard registers to all
304 spilled allocnos. This is impelemented by a simple and fast
305 priority coloring algorithm (see function
306 ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos
307 created during the code change pass can be assigned to hard
308 registers.
310 o At the end IRA calls the reload pass. The reload pass
311 communicates with IRA through several functions in file
312 ira-color.c to improve its decisions in
314 * sharing stack slots for the spilled pseudos based on IRA info
315 about pseudo-register conflicts.
317 * reassigning hard-registers to all spilled pseudos at the end
318 of each reload iteration.
320 * choosing a better hard-register to spill based on IRA info
321 about pseudo-register live ranges and the register pressure
322 in places where the pseudo-register lives.
324 IRA uses a lot of data representing the target processors. These
325 data are initilized in file ira.c.
327 If function has no loops (or the loops are ignored when
328 -fira-algorithm=CB is used), we have classic Chaitin-Briggs
329 coloring (only instead of separate pass of coalescing, we use hard
330 register preferencing). In such case, IRA works much faster
331 because many things are not made (like IR flattening, the
332 spill/restore optimization, and the code change).
334 Literature is worth to read for better understanding the code:
336 o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to
337 Graph Coloring Register Allocation.
339 o David Callahan, Brian Koblenz. Register allocation via
340 hierarchical graph coloring.
342 o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
343 Coloring Register Allocation: A Study of the Chaitin-Briggs and
344 Callahan-Koblenz Algorithms.
346 o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
347 Register Allocation Based on Graph Fusion.
349 o Michael D. Smith and Glenn Holloway. Graph-Coloring Register
350 Allocation for Irregular Architectures
352 o Vladimir Makarov. The Integrated Register Allocator for GCC.
354 o Vladimir Makarov. The top-down register allocator for irregular
355 register file architectures.
360 #include "config.h"
361 #include "system.h"
362 #include "coretypes.h"
363 #include "tm.h"
364 #include "regs.h"
365 #include "rtl.h"
366 #include "tm_p.h"
367 #include "target.h"
368 #include "flags.h"
369 #include "obstack.h"
370 #include "bitmap.h"
371 #include "hard-reg-set.h"
372 #include "basic-block.h"
373 #include "df.h"
374 #include "expr.h"
375 #include "recog.h"
376 #include "params.h"
377 #include "timevar.h"
378 #include "tree-pass.h"
379 #include "output.h"
380 #include "except.h"
381 #include "reload.h"
382 #include "diagnostic-core.h"
383 #include "integrate.h"
384 #include "ggc.h"
385 #include "ira-int.h"
388 struct target_ira default_target_ira;
389 struct target_ira_int default_target_ira_int;
390 #if SWITCHABLE_TARGET
391 struct target_ira *this_target_ira = &default_target_ira;
392 struct target_ira_int *this_target_ira_int = &default_target_ira_int;
393 #endif
395 /* A modified value of flag `-fira-verbose' used internally. */
396 int internal_flag_ira_verbose;
398 /* Dump file of the allocator if it is not NULL. */
399 FILE *ira_dump_file;
401 /* The number of elements in the following array. */
402 int ira_spilled_reg_stack_slots_num;
404 /* The following array contains info about spilled pseudo-registers
405 stack slots used in current function so far. */
406 struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
408 /* Correspondingly overall cost of the allocation, cost of the
409 allocnos assigned to hard-registers, cost of the allocnos assigned
410 to memory, cost of loads, stores and register move insns generated
411 for pseudo-register live range splitting (see ira-emit.c). */
412 int ira_overall_cost;
413 int ira_reg_cost, ira_mem_cost;
414 int ira_load_cost, ira_store_cost, ira_shuffle_cost;
415 int ira_move_loops_num, ira_additional_jumps_num;
417 /* All registers that can be eliminated. */
419 HARD_REG_SET eliminable_regset;
421 /* Temporary hard reg set used for a different calculation. */
422 static HARD_REG_SET temp_hard_regset;
426 /* The function sets up the map IRA_REG_MODE_HARD_REGSET. */
427 static void
428 setup_reg_mode_hard_regset (void)
430 int i, m, hard_regno;
432 for (m = 0; m < NUM_MACHINE_MODES; m++)
433 for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
435 CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
436 for (i = hard_regno_nregs[hard_regno][m] - 1; i >= 0; i--)
437 if (hard_regno + i < FIRST_PSEUDO_REGISTER)
438 SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
439 hard_regno + i);
444 #define no_unit_alloc_regs \
445 (this_target_ira_int->x_no_unit_alloc_regs)
447 /* The function sets up the three arrays declared above. */
448 static void
449 setup_class_hard_regs (void)
451 int cl, i, hard_regno, n;
452 HARD_REG_SET processed_hard_reg_set;
454 ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
455 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
457 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
458 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
459 CLEAR_HARD_REG_SET (processed_hard_reg_set);
460 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
462 ira_non_ordered_class_hard_regs[cl][i] = -1;
463 ira_class_hard_reg_index[cl][i] = -1;
465 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
467 #ifdef REG_ALLOC_ORDER
468 hard_regno = reg_alloc_order[i];
469 #else
470 hard_regno = i;
471 #endif
472 if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
473 continue;
474 SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
475 if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
476 ira_class_hard_reg_index[cl][hard_regno] = -1;
477 else
479 ira_class_hard_reg_index[cl][hard_regno] = n;
480 ira_class_hard_regs[cl][n++] = hard_regno;
483 ira_class_hard_regs_num[cl] = n;
484 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
485 if (TEST_HARD_REG_BIT (temp_hard_regset, i))
486 ira_non_ordered_class_hard_regs[cl][n++] = i;
487 ira_assert (ira_class_hard_regs_num[cl] == n);
491 /* Set up IRA_AVAILABLE_CLASS_REGS. */
492 static void
493 setup_available_class_regs (void)
495 int i, j;
497 memset (ira_available_class_regs, 0, sizeof (ira_available_class_regs));
498 for (i = 0; i < N_REG_CLASSES; i++)
500 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
501 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
502 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
503 if (TEST_HARD_REG_BIT (temp_hard_regset, j))
504 ira_available_class_regs[i]++;
508 /* Set up global variables defining info about hard registers for the
509 allocation. These depend on USE_HARD_FRAME_P whose TRUE value means
510 that we can use the hard frame pointer for the allocation. */
511 static void
512 setup_alloc_regs (bool use_hard_frame_p)
514 #ifdef ADJUST_REG_ALLOC_ORDER
515 ADJUST_REG_ALLOC_ORDER;
516 #endif
517 COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_reg_set);
518 if (! use_hard_frame_p)
519 SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
520 setup_class_hard_regs ();
521 setup_available_class_regs ();
526 #define alloc_reg_class_subclasses \
527 (this_target_ira_int->x_alloc_reg_class_subclasses)
529 /* Initialize the table of subclasses of each reg class. */
530 static void
531 setup_reg_subclasses (void)
533 int i, j;
534 HARD_REG_SET temp_hard_regset2;
536 for (i = 0; i < N_REG_CLASSES; i++)
537 for (j = 0; j < N_REG_CLASSES; j++)
538 alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES;
540 for (i = 0; i < N_REG_CLASSES; i++)
542 if (i == (int) NO_REGS)
543 continue;
545 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
546 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
547 if (hard_reg_set_empty_p (temp_hard_regset))
548 continue;
549 for (j = 0; j < N_REG_CLASSES; j++)
550 if (i != j)
552 enum reg_class *p;
554 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
555 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
556 if (! hard_reg_set_subset_p (temp_hard_regset,
557 temp_hard_regset2))
558 continue;
559 p = &alloc_reg_class_subclasses[j][0];
560 while (*p != LIM_REG_CLASSES) p++;
561 *p = (enum reg_class) i;
568 /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST. */
569 static void
570 setup_class_subset_and_memory_move_costs (void)
572 int cl, cl2, mode, cost;
573 HARD_REG_SET temp_hard_regset2;
575 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
576 ira_memory_move_cost[mode][NO_REGS][0]
577 = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
578 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
580 if (cl != (int) NO_REGS)
581 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
583 ira_max_memory_move_cost[mode][cl][0]
584 = ira_memory_move_cost[mode][cl][0]
585 = memory_move_cost ((enum machine_mode) mode,
586 (reg_class_t) cl, false);
587 ira_max_memory_move_cost[mode][cl][1]
588 = ira_memory_move_cost[mode][cl][1]
589 = memory_move_cost ((enum machine_mode) mode,
590 (reg_class_t) cl, true);
591 /* Costs for NO_REGS are used in cost calculation on the
592 1st pass when the preferred register classes are not
593 known yet. In this case we take the best scenario. */
594 if (ira_memory_move_cost[mode][NO_REGS][0]
595 > ira_memory_move_cost[mode][cl][0])
596 ira_max_memory_move_cost[mode][NO_REGS][0]
597 = ira_memory_move_cost[mode][NO_REGS][0]
598 = ira_memory_move_cost[mode][cl][0];
599 if (ira_memory_move_cost[mode][NO_REGS][1]
600 > ira_memory_move_cost[mode][cl][1])
601 ira_max_memory_move_cost[mode][NO_REGS][1]
602 = ira_memory_move_cost[mode][NO_REGS][1]
603 = ira_memory_move_cost[mode][cl][1];
606 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
607 for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
609 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
610 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
611 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
612 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
613 ira_class_subset_p[cl][cl2]
614 = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
615 if (! hard_reg_set_empty_p (temp_hard_regset2)
616 && hard_reg_set_subset_p (reg_class_contents[cl2],
617 reg_class_contents[cl]))
618 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
620 cost = ira_memory_move_cost[mode][cl2][0];
621 if (cost > ira_max_memory_move_cost[mode][cl][0])
622 ira_max_memory_move_cost[mode][cl][0] = cost;
623 cost = ira_memory_move_cost[mode][cl2][1];
624 if (cost > ira_max_memory_move_cost[mode][cl][1])
625 ira_max_memory_move_cost[mode][cl][1] = cost;
628 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
629 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
631 ira_memory_move_cost[mode][cl][0]
632 = ira_max_memory_move_cost[mode][cl][0];
633 ira_memory_move_cost[mode][cl][1]
634 = ira_max_memory_move_cost[mode][cl][1];
636 setup_reg_subclasses ();
641 /* Define the following macro if allocation through malloc if
642 preferable. */
643 #define IRA_NO_OBSTACK
645 #ifndef IRA_NO_OBSTACK
646 /* Obstack used for storing all dynamic data (except bitmaps) of the
647 IRA. */
648 static struct obstack ira_obstack;
649 #endif
651 /* Obstack used for storing all bitmaps of the IRA. */
652 static struct bitmap_obstack ira_bitmap_obstack;
654 /* Allocate memory of size LEN for IRA data. */
655 void *
656 ira_allocate (size_t len)
658 void *res;
660 #ifndef IRA_NO_OBSTACK
661 res = obstack_alloc (&ira_obstack, len);
662 #else
663 res = xmalloc (len);
664 #endif
665 return res;
668 /* Free memory ADDR allocated for IRA data. */
669 void
670 ira_free (void *addr ATTRIBUTE_UNUSED)
672 #ifndef IRA_NO_OBSTACK
673 /* do nothing */
674 #else
675 free (addr);
676 #endif
680 /* Allocate and returns bitmap for IRA. */
681 bitmap
682 ira_allocate_bitmap (void)
684 return BITMAP_ALLOC (&ira_bitmap_obstack);
687 /* Free bitmap B allocated for IRA. */
688 void
689 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
691 /* do nothing */
696 /* Output information about allocation of all allocnos (except for
697 caps) into file F. */
698 void
699 ira_print_disposition (FILE *f)
701 int i, n, max_regno;
702 ira_allocno_t a;
703 basic_block bb;
705 fprintf (f, "Disposition:");
706 max_regno = max_reg_num ();
707 for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
708 for (a = ira_regno_allocno_map[i];
709 a != NULL;
710 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
712 if (n % 4 == 0)
713 fprintf (f, "\n");
714 n++;
715 fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
716 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
717 fprintf (f, "b%-3d", bb->index);
718 else
719 fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
720 if (ALLOCNO_HARD_REGNO (a) >= 0)
721 fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
722 else
723 fprintf (f, " mem");
725 fprintf (f, "\n");
728 /* Outputs information about allocation of all allocnos into
729 stderr. */
730 void
731 ira_debug_disposition (void)
733 ira_print_disposition (stderr);
738 /* Set up ira_stack_reg_pressure_class which is the biggest pressure
739 register class containing stack registers or NO_REGS if there are
740 no stack registers. To find this class, we iterate through all
741 register pressure classes and choose the first register pressure
742 class containing all the stack registers and having the biggest
743 size. */
744 static void
745 setup_stack_reg_pressure_class (void)
747 ira_stack_reg_pressure_class = NO_REGS;
748 #ifdef STACK_REGS
750 int i, best, size;
751 enum reg_class cl;
752 HARD_REG_SET temp_hard_regset2;
754 CLEAR_HARD_REG_SET (temp_hard_regset);
755 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
756 SET_HARD_REG_BIT (temp_hard_regset, i);
757 best = 0;
758 for (i = 0; i < ira_pressure_classes_num; i++)
760 cl = ira_pressure_classes[i];
761 COPY_HARD_REG_SET (temp_hard_regset2, temp_hard_regset);
762 AND_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
763 size = hard_reg_set_size (temp_hard_regset2);
764 if (best < size)
766 best = size;
767 ira_stack_reg_pressure_class = cl;
771 #endif
774 /* Find pressure classes which are register classes for which we
775 calculate register pressure in IRA, register pressure sensitive
776 insn scheduling, and register pressure sensitive loop invariant
777 motion.
779 To make register pressure calculation easy, we always use
780 non-intersected register pressure classes. A move of hard
781 registers from one register pressure class is not more expensive
782 than load and store of the hard registers. Most likely an allocno
783 class will be a subset of a register pressure class and in many
784 cases a register pressure class. That makes usage of register
785 pressure classes a good approximation to find a high register
786 pressure. */
787 static void
788 setup_pressure_classes (void)
790 int cost, i, n, curr;
791 int cl, cl2;
792 enum reg_class pressure_classes[N_REG_CLASSES];
793 int m;
794 HARD_REG_SET temp_hard_regset2;
795 bool insert_p;
797 n = 0;
798 for (cl = 0; cl < N_REG_CLASSES; cl++)
800 if (ira_available_class_regs[cl] == 0)
801 continue;
802 if (ira_available_class_regs[cl] != 1
803 /* A register class without subclasses may contain a few
804 hard registers and movement between them is costly
805 (e.g. SPARC FPCC registers). We still should consider it
806 as a candidate for a pressure class. */
807 && alloc_reg_class_subclasses[cl][0] != LIM_REG_CLASSES)
809 /* Check that the moves between any hard registers of the
810 current class are not more expensive for a legal mode
811 than load/store of the hard registers of the current
812 class. Such class is a potential candidate to be a
813 register pressure class. */
814 for (m = 0; m < NUM_MACHINE_MODES; m++)
816 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
817 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
818 AND_COMPL_HARD_REG_SET (temp_hard_regset,
819 ira_prohibited_class_mode_regs[cl][m]);
820 if (hard_reg_set_empty_p (temp_hard_regset))
821 continue;
822 ira_init_register_move_cost_if_necessary ((enum machine_mode) m);
823 cost = ira_register_move_cost[m][cl][cl];
824 if (cost <= ira_max_memory_move_cost[m][cl][1]
825 || cost <= ira_max_memory_move_cost[m][cl][0])
826 break;
828 if (m >= NUM_MACHINE_MODES)
829 continue;
831 curr = 0;
832 insert_p = true;
833 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
834 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
835 /* Remove so far added pressure classes which are subset of the
836 current candidate class. Prefer GENERAL_REGS as a pressure
837 register class to another class containing the same
838 allocatable hard registers. We do this because machine
839 dependent cost hooks might give wrong costs for the latter
840 class but always give the right cost for the former class
841 (GENERAL_REGS). */
842 for (i = 0; i < n; i++)
844 cl2 = pressure_classes[i];
845 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
846 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
847 if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
848 && (! hard_reg_set_equal_p (temp_hard_regset, temp_hard_regset2)
849 || cl2 == (int) GENERAL_REGS))
851 pressure_classes[curr++] = (enum reg_class) cl2;
852 insert_p = false;
853 continue;
855 if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)
856 && (! hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset)
857 || cl == (int) GENERAL_REGS))
858 continue;
859 if (hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset))
860 insert_p = false;
861 pressure_classes[curr++] = (enum reg_class) cl2;
863 /* If the current candidate is a subset of a so far added
864 pressure class, don't add it to the list of the pressure
865 classes. */
866 if (insert_p)
867 pressure_classes[curr++] = (enum reg_class) cl;
868 n = curr;
870 #ifdef ENABLE_IRA_CHECKING
872 HARD_REG_SET ignore_hard_regs;
874 /* Check pressure classes correctness: here we check that hard
875 registers from all register pressure classes contains all hard
876 registers available for the allocation. */
877 CLEAR_HARD_REG_SET (temp_hard_regset);
878 CLEAR_HARD_REG_SET (temp_hard_regset2);
879 COPY_HARD_REG_SET (ignore_hard_regs, no_unit_alloc_regs);
880 for (cl = 0; cl < LIM_REG_CLASSES; cl++)
882 /* For some targets (like MIPS with MD_REGS), there are some
883 classes with hard registers available for allocation but
884 not able to hold value of any mode. */
885 for (m = 0; m < NUM_MACHINE_MODES; m++)
886 if (contains_reg_of_mode[cl][m])
887 break;
888 if (m >= NUM_MACHINE_MODES)
890 IOR_HARD_REG_SET (ignore_hard_regs, reg_class_contents[cl]);
891 continue;
893 for (i = 0; i < n; i++)
894 if ((int) pressure_classes[i] == cl)
895 break;
896 IOR_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
897 if (i < n)
898 IOR_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
900 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
901 /* Some targets (like SPARC with ICC reg) have alocatable regs
902 for which no reg class is defined. */
903 if (REGNO_REG_CLASS (i) == NO_REGS)
904 SET_HARD_REG_BIT (ignore_hard_regs, i);
905 AND_COMPL_HARD_REG_SET (temp_hard_regset, ignore_hard_regs);
906 AND_COMPL_HARD_REG_SET (temp_hard_regset2, ignore_hard_regs);
907 ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset));
909 #endif
910 ira_pressure_classes_num = 0;
911 for (i = 0; i < n; i++)
913 cl = (int) pressure_classes[i];
914 ira_reg_pressure_class_p[cl] = true;
915 ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl;
917 setup_stack_reg_pressure_class ();
920 /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM,
921 IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM.
923 Target may have many subtargets and not all target hard regiters can
924 be used for allocation, e.g. x86 port in 32-bit mode can not use
925 hard registers introduced in x86-64 like r8-r15). Some classes
926 might have the same allocatable hard registers, e.g. INDEX_REGS
927 and GENERAL_REGS in x86 port in 32-bit mode. To decrease different
928 calculations efforts we introduce allocno classes which contain
929 unique non-empty sets of allocatable hard-registers.
931 Pseudo class cost calculation in ira-costs.c is very expensive.
932 Therefore we are trying to decrease number of classes involved in
933 such calculation. Register classes used in the cost calculation
934 are called important classes. They are allocno classes and other
935 non-empty classes whose allocatable hard register sets are inside
936 of an allocno class hard register set. From the first sight, it
937 looks like that they are just allocno classes. It is not true. In
938 example of x86-port in 32-bit mode, allocno classes will contain
939 GENERAL_REGS but not LEGACY_REGS (because allocatable hard
940 registers are the same for the both classes). The important
941 classes will contain GENERAL_REGS and LEGACY_REGS. It is done
942 because a machine description insn constraint may refers for
943 LEGACY_REGS and code in ira-costs.c is mostly base on investigation
944 of the insn constraints. */
945 static void
946 setup_allocno_and_important_classes (void)
948 int i, j, n, cl;
949 bool set_p;
950 HARD_REG_SET temp_hard_regset2;
951 static enum reg_class classes[LIM_REG_CLASSES + 1];
953 n = 0;
954 /* Collect classes which contain unique sets of allocatable hard
955 registers. Prefer GENERAL_REGS to other classes containing the
956 same set of hard registers. */
957 for (i = 0; i < LIM_REG_CLASSES; i++)
959 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
960 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
961 for (j = 0; j < n; j++)
963 cl = classes[j];
964 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
965 AND_COMPL_HARD_REG_SET (temp_hard_regset2,
966 no_unit_alloc_regs);
967 if (hard_reg_set_equal_p (temp_hard_regset,
968 temp_hard_regset2))
969 break;
971 if (j >= n)
972 classes[n++] = (enum reg_class) i;
973 else if (i == GENERAL_REGS)
974 /* Prefer general regs. For i386 example, it means that
975 we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS
976 (all of them consists of the same available hard
977 registers). */
978 classes[j] = (enum reg_class) i;
980 classes[n] = LIM_REG_CLASSES;
982 /* Set up classes which can be used for allocnos as classes
983 conatining non-empty unique sets of allocatable hard
984 registers. */
985 ira_allocno_classes_num = 0;
986 for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
988 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
989 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
990 if (hard_reg_set_empty_p (temp_hard_regset))
991 continue;
992 ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl;
994 ira_important_classes_num = 0;
995 /* Add non-allocno classes containing to non-empty set of
996 allocatable hard regs. */
997 for (cl = 0; cl < N_REG_CLASSES; cl++)
999 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1000 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1001 if (! hard_reg_set_empty_p (temp_hard_regset))
1003 set_p = false;
1004 for (j = 0; j < ira_allocno_classes_num; j++)
1006 COPY_HARD_REG_SET (temp_hard_regset2,
1007 reg_class_contents[ira_allocno_classes[j]]);
1008 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
1009 if ((enum reg_class) cl == ira_allocno_classes[j])
1010 break;
1011 else if (hard_reg_set_subset_p (temp_hard_regset,
1012 temp_hard_regset2))
1013 set_p = true;
1015 if (set_p && j >= ira_allocno_classes_num)
1016 ira_important_classes[ira_important_classes_num++]
1017 = (enum reg_class) cl;
1020 /* Now add allocno classes to the important classes. */
1021 for (j = 0; j < ira_allocno_classes_num; j++)
1022 ira_important_classes[ira_important_classes_num++]
1023 = ira_allocno_classes[j];
1024 for (cl = 0; cl < N_REG_CLASSES; cl++)
1026 ira_reg_allocno_class_p[cl] = false;
1027 ira_reg_pressure_class_p[cl] = false;
1029 for (j = 0; j < ira_allocno_classes_num; j++)
1030 ira_reg_allocno_class_p[ira_allocno_classes[j]] = true;
1031 setup_pressure_classes ();
1034 /* Setup translation in CLASS_TRANSLATE of all classes into a class
1035 given by array CLASSES of length CLASSES_NUM. The function is used
1036 make translation any reg class to an allocno class or to an
1037 pressure class. This translation is necessary for some
1038 calculations when we can use only allocno or pressure classes and
1039 such translation represents an approximate representation of all
1040 classes.
1042 The translation in case when allocatable hard register set of a
1043 given class is subset of allocatable hard register set of a class
1044 in CLASSES is pretty simple. We use smallest classes from CLASSES
1045 containing a given class. If allocatable hard register set of a
1046 given class is not a subset of any corresponding set of a class
1047 from CLASSES, we use the cheapest (with load/store point of view)
1048 class from CLASSES whose set intersects with given class set */
1049 static void
1050 setup_class_translate_array (enum reg_class *class_translate,
1051 int classes_num, enum reg_class *classes)
1053 int cl, mode;
1054 enum reg_class aclass, best_class, *cl_ptr;
1055 int i, cost, min_cost, best_cost;
1057 for (cl = 0; cl < N_REG_CLASSES; cl++)
1058 class_translate[cl] = NO_REGS;
1060 for (i = 0; i < classes_num; i++)
1062 aclass = classes[i];
1063 for (cl_ptr = &alloc_reg_class_subclasses[aclass][0];
1064 (cl = *cl_ptr) != LIM_REG_CLASSES;
1065 cl_ptr++)
1066 if (class_translate[cl] == NO_REGS)
1067 class_translate[cl] = aclass;
1068 class_translate[aclass] = aclass;
1070 /* For classes which are not fully covered by one of given classes
1071 (in other words covered by more one given class), use the
1072 cheapest class. */
1073 for (cl = 0; cl < N_REG_CLASSES; cl++)
1075 if (cl == NO_REGS || class_translate[cl] != NO_REGS)
1076 continue;
1077 best_class = NO_REGS;
1078 best_cost = INT_MAX;
1079 for (i = 0; i < classes_num; i++)
1081 aclass = classes[i];
1082 COPY_HARD_REG_SET (temp_hard_regset,
1083 reg_class_contents[aclass]);
1084 AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1085 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1086 if (! hard_reg_set_empty_p (temp_hard_regset))
1088 min_cost = INT_MAX;
1089 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1091 cost = (ira_memory_move_cost[mode][cl][0]
1092 + ira_memory_move_cost[mode][cl][1]);
1093 if (min_cost > cost)
1094 min_cost = cost;
1096 if (best_class == NO_REGS || best_cost > min_cost)
1098 best_class = aclass;
1099 best_cost = min_cost;
1103 class_translate[cl] = best_class;
1107 /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and
1108 IRA_PRESSURE_CLASS_TRANSLATE. */
1109 static void
1110 setup_class_translate (void)
1112 setup_class_translate_array (ira_allocno_class_translate,
1113 ira_allocno_classes_num, ira_allocno_classes);
1114 setup_class_translate_array (ira_pressure_class_translate,
1115 ira_pressure_classes_num, ira_pressure_classes);
1118 /* Order numbers of allocno classes in original target allocno class
1119 array, -1 for non-allocno classes. */
1120 static int allocno_class_order[N_REG_CLASSES];
1122 /* The function used to sort the important classes. */
1123 static int
1124 comp_reg_classes_func (const void *v1p, const void *v2p)
1126 enum reg_class cl1 = *(const enum reg_class *) v1p;
1127 enum reg_class cl2 = *(const enum reg_class *) v2p;
1128 enum reg_class tcl1, tcl2;
1129 int diff;
1131 tcl1 = ira_allocno_class_translate[cl1];
1132 tcl2 = ira_allocno_class_translate[cl2];
1133 if (tcl1 != NO_REGS && tcl2 != NO_REGS
1134 && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0)
1135 return diff;
1136 return (int) cl1 - (int) cl2;
1139 /* For correct work of function setup_reg_class_relation we need to
1140 reorder important classes according to the order of their allocno
1141 classes. It places important classes containing the same
1142 allocatable hard register set adjacent to each other and allocno
1143 class with the allocatable hard register set right after the other
1144 important classes with the same set.
1146 In example from comments of function
1147 setup_allocno_and_important_classes, it places LEGACY_REGS and
1148 GENERAL_REGS close to each other and GENERAL_REGS is after
1149 LEGACY_REGS. */
1150 static void
1151 reorder_important_classes (void)
1153 int i;
1155 for (i = 0; i < N_REG_CLASSES; i++)
1156 allocno_class_order[i] = -1;
1157 for (i = 0; i < ira_allocno_classes_num; i++)
1158 allocno_class_order[ira_allocno_classes[i]] = i;
1159 qsort (ira_important_classes, ira_important_classes_num,
1160 sizeof (enum reg_class), comp_reg_classes_func);
1161 for (i = 0; i < ira_important_classes_num; i++)
1162 ira_important_class_nums[ira_important_classes[i]] = i;
1165 /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION,
1166 IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and
1167 IRA_REG_CLASSES_INTERSECT_P. For the meaning of the relations,
1168 please see corresponding comments in ira-int.h. */
1169 static void
1170 setup_reg_class_relations (void)
1172 int i, cl1, cl2, cl3;
1173 HARD_REG_SET intersection_set, union_set, temp_set2;
1174 bool important_class_p[N_REG_CLASSES];
1176 memset (important_class_p, 0, sizeof (important_class_p));
1177 for (i = 0; i < ira_important_classes_num; i++)
1178 important_class_p[ira_important_classes[i]] = true;
1179 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1181 ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES;
1182 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1184 ira_reg_classes_intersect_p[cl1][cl2] = false;
1185 ira_reg_class_intersect[cl1][cl2] = NO_REGS;
1186 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
1187 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1188 COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
1189 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1190 if (hard_reg_set_empty_p (temp_hard_regset)
1191 && hard_reg_set_empty_p (temp_set2))
1193 /* The both classes have no allocatable hard registers
1194 -- take all class hard registers into account and use
1195 reg_class_subunion and reg_class_superunion. */
1196 for (i = 0;; i++)
1198 cl3 = reg_class_subclasses[cl1][i];
1199 if (cl3 == LIM_REG_CLASSES)
1200 break;
1201 if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
1202 (enum reg_class) cl3))
1203 ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1205 ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2];
1206 ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2];
1207 continue;
1209 ira_reg_classes_intersect_p[cl1][cl2]
1210 = hard_reg_set_intersect_p (temp_hard_regset, temp_set2);
1211 if (important_class_p[cl1] && important_class_p[cl2]
1212 && hard_reg_set_subset_p (temp_hard_regset, temp_set2))
1214 /* CL1 and CL2 are important classes and CL1 allocatable
1215 hard register set is inside of CL2 allocatable hard
1216 registers -- make CL1 a superset of CL2. */
1217 enum reg_class *p;
1219 p = &ira_reg_class_super_classes[cl1][0];
1220 while (*p != LIM_REG_CLASSES)
1221 p++;
1222 *p++ = (enum reg_class) cl2;
1223 *p = LIM_REG_CLASSES;
1225 ira_reg_class_subunion[cl1][cl2] = NO_REGS;
1226 ira_reg_class_superunion[cl1][cl2] = NO_REGS;
1227 COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
1228 AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
1229 AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
1230 COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
1231 IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
1232 AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
1233 for (i = 0; i < ira_important_classes_num; i++)
1235 cl3 = ira_important_classes[i];
1236 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
1237 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1238 if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
1240 /* CL3 allocatable hard register set is inside of
1241 intersection of allocatable hard register sets
1242 of CL1 and CL2. */
1243 COPY_HARD_REG_SET
1244 (temp_set2,
1245 reg_class_contents[(int)
1246 ira_reg_class_intersect[cl1][cl2]]);
1247 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1248 if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1249 /* If the allocatable hard register sets are the
1250 same, prefer GENERAL_REGS or the smallest
1251 class for debugging purposes. */
1252 || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
1253 && (cl3 == GENERAL_REGS
1254 || (ira_reg_class_intersect[cl1][cl2] != GENERAL_REGS
1255 && hard_reg_set_subset_p
1256 (reg_class_contents[cl3],
1257 reg_class_contents
1258 [(int) ira_reg_class_intersect[cl1][cl2]])))))
1259 ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1261 if (hard_reg_set_subset_p (temp_hard_regset, union_set))
1263 /* CL3 allocatbale hard register set is inside of
1264 union of allocatable hard register sets of CL1
1265 and CL2. */
1266 COPY_HARD_REG_SET
1267 (temp_set2,
1268 reg_class_contents[(int) ira_reg_class_subunion[cl1][cl2]]);
1269 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1270 if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
1271 || (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
1273 && (! hard_reg_set_equal_p (temp_set2,
1274 temp_hard_regset)
1275 || cl3 == GENERAL_REGS
1276 /* If the allocatable hard register sets are the
1277 same, prefer GENERAL_REGS or the smallest
1278 class for debugging purposes. */
1279 || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS
1280 && hard_reg_set_subset_p
1281 (reg_class_contents[cl3],
1282 reg_class_contents
1283 [(int) ira_reg_class_subunion[cl1][cl2]])))))
1284 ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3;
1286 if (hard_reg_set_subset_p (union_set, temp_hard_regset))
1288 /* CL3 allocatable hard register set contains union
1289 of allocatable hard register sets of CL1 and
1290 CL2. */
1291 COPY_HARD_REG_SET
1292 (temp_set2,
1293 reg_class_contents[(int) ira_reg_class_superunion[cl1][cl2]]);
1294 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1295 if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
1296 || (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1298 && (! hard_reg_set_equal_p (temp_set2,
1299 temp_hard_regset)
1300 || cl3 == GENERAL_REGS
1301 /* If the allocatable hard register sets are the
1302 same, prefer GENERAL_REGS or the smallest
1303 class for debugging purposes. */
1304 || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS
1305 && hard_reg_set_subset_p
1306 (reg_class_contents[cl3],
1307 reg_class_contents
1308 [(int) ira_reg_class_superunion[cl1][cl2]])))))
1309 ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3;
1316 /* Output all possible allocno classes and the translation map into
1317 file F. */
1318 static void
1319 print_classes (FILE *f, bool pressure_p)
1321 int classes_num = (pressure_p
1322 ? ira_pressure_classes_num : ira_allocno_classes_num);
1323 enum reg_class *classes = (pressure_p
1324 ? ira_pressure_classes : ira_allocno_classes);
1325 enum reg_class *class_translate = (pressure_p
1326 ? ira_pressure_class_translate
1327 : ira_allocno_class_translate);
1328 static const char *const reg_class_names[] = REG_CLASS_NAMES;
1329 int i;
1331 fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno");
1332 for (i = 0; i < classes_num; i++)
1333 fprintf (f, " %s", reg_class_names[classes[i]]);
1334 fprintf (f, "\nClass translation:\n");
1335 for (i = 0; i < N_REG_CLASSES; i++)
1336 fprintf (f, " %s -> %s\n", reg_class_names[i],
1337 reg_class_names[class_translate[i]]);
1340 /* Output all possible allocno and translation classes and the
1341 translation maps into stderr. */
1342 void
1343 ira_debug_allocno_classes (void)
1345 print_classes (stderr, false);
1346 print_classes (stderr, true);
1349 /* Set up different arrays concerning class subsets, allocno and
1350 important classes. */
1351 static void
1352 find_reg_classes (void)
1354 setup_allocno_and_important_classes ();
1355 setup_class_translate ();
1356 reorder_important_classes ();
1357 setup_reg_class_relations ();
1362 /* Set up the array above. */
1363 static void
1364 setup_hard_regno_aclass (void)
1366 int i;
1368 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1370 #if 1
1371 ira_hard_regno_allocno_class[i]
1372 = (TEST_HARD_REG_BIT (no_unit_alloc_regs, i)
1373 ? NO_REGS
1374 : ira_allocno_class_translate[REGNO_REG_CLASS (i)]);
1375 #else
1376 int j;
1377 enum reg_class cl;
1378 ira_hard_regno_allocno_class[i] = NO_REGS;
1379 for (j = 0; j < ira_allocno_classes_num; j++)
1381 cl = ira_allocno_classes[j];
1382 if (ira_class_hard_reg_index[cl][i] >= 0)
1384 ira_hard_regno_allocno_class[i] = cl;
1385 break;
1388 #endif
1394 /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps. */
1395 static void
1396 setup_reg_class_nregs (void)
1398 int i, cl, cl2, m;
1400 for (m = 0; m < MAX_MACHINE_MODE; m++)
1402 for (cl = 0; cl < N_REG_CLASSES; cl++)
1403 ira_reg_class_max_nregs[cl][m]
1404 = ira_reg_class_min_nregs[cl][m]
1405 = CLASS_MAX_NREGS ((enum reg_class) cl, (enum machine_mode) m);
1406 for (cl = 0; cl < N_REG_CLASSES; cl++)
1407 for (i = 0;
1408 (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES;
1409 i++)
1410 if (ira_reg_class_min_nregs[cl2][m]
1411 < ira_reg_class_min_nregs[cl][m])
1412 ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m];
1418 /* Set up IRA_PROHIBITED_CLASS_MODE_REGS. */
1419 static void
1420 setup_prohibited_class_mode_regs (void)
1422 int j, k, hard_regno, cl;
1424 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1426 for (j = 0; j < NUM_MACHINE_MODES; j++)
1428 CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]);
1429 for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1431 hard_regno = ira_class_hard_regs[cl][k];
1432 if (! HARD_REGNO_MODE_OK (hard_regno, (enum machine_mode) j))
1433 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1434 hard_regno);
1440 /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers
1441 spanning from one register pressure class to another one. It is
1442 called after defining the pressure classes. */
1443 static void
1444 clarify_prohibited_class_mode_regs (void)
1446 int j, k, hard_regno, cl, pclass, nregs;
1448 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1449 for (j = 0; j < NUM_MACHINE_MODES; j++)
1450 for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1452 hard_regno = ira_class_hard_regs[cl][k];
1453 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno))
1454 continue;
1455 nregs = hard_regno_nregs[hard_regno][j];
1456 if (hard_regno + nregs > FIRST_PSEUDO_REGISTER)
1458 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1459 hard_regno);
1460 continue;
1462 pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1463 for (nregs-- ;nregs >= 0; nregs--)
1464 if (((enum reg_class) pclass
1465 != ira_pressure_class_translate[REGNO_REG_CLASS
1466 (hard_regno + nregs)]))
1468 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1469 hard_regno);
1470 break;
1477 /* Allocate and initialize IRA_REGISTER_MOVE_COST,
1478 IRA_MAX_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST,
1479 IRA_MAY_MOVE_OUT_COST, IRA_MAX_MAY_MOVE_IN_COST, and
1480 IRA_MAX_MAY_MOVE_OUT_COST for MODE if it is not done yet. */
1481 void
1482 ira_init_register_move_cost (enum machine_mode mode)
1484 int cl1, cl2, cl3;
1486 ira_assert (ira_register_move_cost[mode] == NULL
1487 && ira_max_register_move_cost[mode] == NULL
1488 && ira_may_move_in_cost[mode] == NULL
1489 && ira_may_move_out_cost[mode] == NULL
1490 && ira_max_may_move_in_cost[mode] == NULL
1491 && ira_max_may_move_out_cost[mode] == NULL);
1492 if (move_cost[mode] == NULL)
1493 init_move_cost (mode);
1494 ira_register_move_cost[mode] = move_cost[mode];
1495 /* Don't use ira_allocate because the tables exist out of scope of a
1496 IRA call. */
1497 ira_max_register_move_cost[mode]
1498 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1499 memcpy (ira_max_register_move_cost[mode], ira_register_move_cost[mode],
1500 sizeof (move_table) * N_REG_CLASSES);
1501 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1503 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
1504 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1505 if (hard_reg_set_empty_p (temp_hard_regset))
1506 continue;
1507 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1508 if (hard_reg_set_subset_p (reg_class_contents[cl1],
1509 reg_class_contents[cl2]))
1510 for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
1512 if (ira_max_register_move_cost[mode][cl2][cl3]
1513 < ira_register_move_cost[mode][cl1][cl3])
1514 ira_max_register_move_cost[mode][cl2][cl3]
1515 = ira_register_move_cost[mode][cl1][cl3];
1516 if (ira_max_register_move_cost[mode][cl3][cl2]
1517 < ira_register_move_cost[mode][cl3][cl1])
1518 ira_max_register_move_cost[mode][cl3][cl2]
1519 = ira_register_move_cost[mode][cl3][cl1];
1522 ira_may_move_in_cost[mode]
1523 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1524 memcpy (ira_may_move_in_cost[mode], may_move_in_cost[mode],
1525 sizeof (move_table) * N_REG_CLASSES);
1526 ira_may_move_out_cost[mode]
1527 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1528 memcpy (ira_may_move_out_cost[mode], may_move_out_cost[mode],
1529 sizeof (move_table) * N_REG_CLASSES);
1530 ira_max_may_move_in_cost[mode]
1531 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1532 memcpy (ira_max_may_move_in_cost[mode], ira_max_register_move_cost[mode],
1533 sizeof (move_table) * N_REG_CLASSES);
1534 ira_max_may_move_out_cost[mode]
1535 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1536 memcpy (ira_max_may_move_out_cost[mode], ira_max_register_move_cost[mode],
1537 sizeof (move_table) * N_REG_CLASSES);
1538 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1540 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1542 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl2]);
1543 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1544 if (hard_reg_set_empty_p (temp_hard_regset))
1545 continue;
1546 if (ira_class_subset_p[cl1][cl2])
1547 ira_may_move_in_cost[mode][cl1][cl2] = 0;
1548 if (ira_class_subset_p[cl2][cl1])
1549 ira_may_move_out_cost[mode][cl1][cl2] = 0;
1550 if (ira_class_subset_p[cl1][cl2])
1551 ira_max_may_move_in_cost[mode][cl1][cl2] = 0;
1552 if (ira_class_subset_p[cl2][cl1])
1553 ira_max_may_move_out_cost[mode][cl1][cl2] = 0;
1554 ira_register_move_cost[mode][cl1][cl2]
1555 = ira_max_register_move_cost[mode][cl1][cl2];
1556 ira_may_move_in_cost[mode][cl1][cl2]
1557 = ira_max_may_move_in_cost[mode][cl1][cl2];
1558 ira_may_move_out_cost[mode][cl1][cl2]
1559 = ira_max_may_move_out_cost[mode][cl1][cl2];
1566 /* This is called once during compiler work. It sets up
1567 different arrays whose values don't depend on the compiled
1568 function. */
1569 void
1570 ira_init_once (void)
1572 int mode;
1574 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1576 ira_register_move_cost[mode] = NULL;
1577 ira_max_register_move_cost[mode] = NULL;
1578 ira_may_move_in_cost[mode] = NULL;
1579 ira_may_move_out_cost[mode] = NULL;
1580 ira_max_may_move_in_cost[mode] = NULL;
1581 ira_max_may_move_out_cost[mode] = NULL;
1583 ira_init_costs_once ();
1586 /* Free ira_max_register_move_cost, ira_may_move_in_cost,
1587 ira_may_move_out_cost, ira_max_may_move_in_cost, and
1588 ira_max_may_move_out_cost for each mode. */
1589 static void
1590 free_register_move_costs (void)
1592 int mode;
1594 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1596 free (ira_max_register_move_cost[mode]);
1597 free (ira_may_move_in_cost[mode]);
1598 free (ira_may_move_out_cost[mode]);
1599 free (ira_max_may_move_in_cost[mode]);
1600 free (ira_max_may_move_out_cost[mode]);
1601 ira_register_move_cost[mode] = NULL;
1602 ira_max_register_move_cost[mode] = NULL;
1603 ira_may_move_in_cost[mode] = NULL;
1604 ira_may_move_out_cost[mode] = NULL;
1605 ira_max_may_move_in_cost[mode] = NULL;
1606 ira_max_may_move_out_cost[mode] = NULL;
1610 /* This is called every time when register related information is
1611 changed. */
1612 void
1613 ira_init (void)
1615 free_register_move_costs ();
1616 setup_reg_mode_hard_regset ();
1617 setup_alloc_regs (flag_omit_frame_pointer != 0);
1618 setup_class_subset_and_memory_move_costs ();
1619 setup_reg_class_nregs ();
1620 setup_prohibited_class_mode_regs ();
1621 find_reg_classes ();
1622 clarify_prohibited_class_mode_regs ();
1623 setup_hard_regno_aclass ();
1624 ira_init_costs ();
1627 /* Function called once at the end of compiler work. */
1628 void
1629 ira_finish_once (void)
1631 ira_finish_costs_once ();
1632 free_register_move_costs ();
1636 #define ira_prohibited_mode_move_regs_initialized_p \
1637 (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p)
1639 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */
1640 static void
1641 setup_prohibited_mode_move_regs (void)
1643 int i, j;
1644 rtx test_reg1, test_reg2, move_pat, move_insn;
1646 if (ira_prohibited_mode_move_regs_initialized_p)
1647 return;
1648 ira_prohibited_mode_move_regs_initialized_p = true;
1649 test_reg1 = gen_rtx_REG (VOIDmode, 0);
1650 test_reg2 = gen_rtx_REG (VOIDmode, 0);
1651 move_pat = gen_rtx_SET (VOIDmode, test_reg1, test_reg2);
1652 move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, 0, move_pat, 0, -1, 0);
1653 for (i = 0; i < NUM_MACHINE_MODES; i++)
1655 SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
1656 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
1658 if (! HARD_REGNO_MODE_OK (j, (enum machine_mode) i))
1659 continue;
1660 SET_REGNO_RAW (test_reg1, j);
1661 PUT_MODE (test_reg1, (enum machine_mode) i);
1662 SET_REGNO_RAW (test_reg2, j);
1663 PUT_MODE (test_reg2, (enum machine_mode) i);
1664 INSN_CODE (move_insn) = -1;
1665 recog_memoized (move_insn);
1666 if (INSN_CODE (move_insn) < 0)
1667 continue;
1668 extract_insn (move_insn);
1669 if (! constrain_operands (1))
1670 continue;
1671 CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
1678 /* Return nonzero if REGNO is a particularly bad choice for reloading X. */
1679 static bool
1680 ira_bad_reload_regno_1 (int regno, rtx x)
1682 int x_regno, n, i;
1683 ira_allocno_t a;
1684 enum reg_class pref;
1686 /* We only deal with pseudo regs. */
1687 if (! x || GET_CODE (x) != REG)
1688 return false;
1690 x_regno = REGNO (x);
1691 if (x_regno < FIRST_PSEUDO_REGISTER)
1692 return false;
1694 /* If the pseudo prefers REGNO explicitly, then do not consider
1695 REGNO a bad spill choice. */
1696 pref = reg_preferred_class (x_regno);
1697 if (reg_class_size[pref] == 1)
1698 return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno);
1700 /* If the pseudo conflicts with REGNO, then we consider REGNO a
1701 poor choice for a reload regno. */
1702 a = ira_regno_allocno_map[x_regno];
1703 n = ALLOCNO_NUM_OBJECTS (a);
1704 for (i = 0; i < n; i++)
1706 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1707 if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
1708 return true;
1710 return false;
1713 /* Return nonzero if REGNO is a particularly bad choice for reloading
1714 IN or OUT. */
1715 bool
1716 ira_bad_reload_regno (int regno, rtx in, rtx out)
1718 return (ira_bad_reload_regno_1 (regno, in)
1719 || ira_bad_reload_regno_1 (regno, out));
1722 /* Return TRUE if *LOC contains an asm. */
1723 static int
1724 insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
1726 if ( !*loc)
1727 return FALSE;
1728 if (GET_CODE (*loc) == ASM_OPERANDS)
1729 return TRUE;
1730 return FALSE;
1734 /* Return TRUE if INSN contains an ASM. */
1735 static bool
1736 insn_contains_asm (rtx insn)
1738 return for_each_rtx (&insn, insn_contains_asm_1, NULL);
1741 /* Add register clobbers from asm statements. */
1742 static void
1743 compute_regs_asm_clobbered (void)
1745 basic_block bb;
1747 FOR_EACH_BB (bb)
1749 rtx insn;
1750 FOR_BB_INSNS_REVERSE (bb, insn)
1752 df_ref *def_rec;
1754 if (insn_contains_asm (insn))
1755 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
1757 df_ref def = *def_rec;
1758 unsigned int dregno = DF_REF_REGNO (def);
1759 if (HARD_REGISTER_NUM_P (dregno))
1760 add_to_hard_reg_set (&crtl->asm_clobbers,
1761 GET_MODE (DF_REF_REAL_REG (def)),
1762 dregno);
1769 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and REGS_EVER_LIVE. */
1770 void
1771 ira_setup_eliminable_regset (void)
1773 #ifdef ELIMINABLE_REGS
1774 int i;
1775 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
1776 #endif
1777 /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
1778 sp for alloca. So we can't eliminate the frame pointer in that
1779 case. At some point, we should improve this by emitting the
1780 sp-adjusting insns for this case. */
1781 int need_fp
1782 = (! flag_omit_frame_pointer
1783 || (cfun->calls_alloca && EXIT_IGNORE_STACK)
1784 /* We need the frame pointer to catch stack overflow exceptions
1785 if the stack pointer is moving. */
1786 || (flag_stack_check && STACK_CHECK_MOVING_SP)
1787 || crtl->accesses_prior_frames
1788 || crtl->stack_realign_needed
1789 || targetm.frame_pointer_required ());
1791 frame_pointer_needed = need_fp;
1793 COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
1794 CLEAR_HARD_REG_SET (eliminable_regset);
1796 compute_regs_asm_clobbered ();
1798 /* Build the regset of all eliminable registers and show we can't
1799 use those that we already know won't be eliminated. */
1800 #ifdef ELIMINABLE_REGS
1801 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
1803 bool cannot_elim
1804 = (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
1805 || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp));
1807 if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from))
1809 SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
1811 if (cannot_elim)
1812 SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
1814 else if (cannot_elim)
1815 error ("%s cannot be used in asm here",
1816 reg_names[eliminables[i].from]);
1817 else
1818 df_set_regs_ever_live (eliminables[i].from, true);
1820 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1821 if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
1823 SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
1824 if (need_fp)
1825 SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
1827 else if (need_fp)
1828 error ("%s cannot be used in asm here",
1829 reg_names[HARD_FRAME_POINTER_REGNUM]);
1830 else
1831 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
1832 #endif
1834 #else
1835 if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
1837 SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
1838 if (need_fp)
1839 SET_HARD_REG_BIT (ira_no_alloc_regs, FRAME_POINTER_REGNUM);
1841 else if (need_fp)
1842 error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]);
1843 else
1844 df_set_regs_ever_live (FRAME_POINTER_REGNUM, true);
1845 #endif
1850 /* The length of the following two arrays. */
1851 int ira_reg_equiv_len;
1853 /* The element value is TRUE if the corresponding regno value is
1854 invariant. */
1855 bool *ira_reg_equiv_invariant_p;
1857 /* The element value is equiv constant of given pseudo-register or
1858 NULL_RTX. */
1859 rtx *ira_reg_equiv_const;
1861 /* Set up the two arrays declared above. */
1862 static void
1863 find_reg_equiv_invariant_const (void)
1865 unsigned int i;
1866 bool invariant_p;
1867 rtx list, insn, note, constant, x;
1869 for (i = FIRST_PSEUDO_REGISTER; i < VEC_length (reg_equivs_t, reg_equivs); i++)
1871 constant = NULL_RTX;
1872 invariant_p = false;
1873 for (list = reg_equiv_init (i); list != NULL_RTX; list = XEXP (list, 1))
1875 insn = XEXP (list, 0);
1876 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
1878 if (note == NULL_RTX)
1879 continue;
1881 x = XEXP (note, 0);
1883 if (! CONSTANT_P (x)
1884 || ! flag_pic || LEGITIMATE_PIC_OPERAND_P (x))
1886 /* It can happen that a REG_EQUIV note contains a MEM
1887 that is not a legitimate memory operand. As later
1888 stages of the reload assume that all addresses found
1889 in the reg_equiv_* arrays were originally legitimate,
1890 we ignore such REG_EQUIV notes. */
1891 if (memory_operand (x, VOIDmode))
1892 invariant_p = MEM_READONLY_P (x);
1893 else if (function_invariant_p (x))
1895 if (GET_CODE (x) == PLUS
1896 || x == frame_pointer_rtx || x == arg_pointer_rtx)
1897 invariant_p = true;
1898 else
1899 constant = x;
1903 ira_reg_equiv_invariant_p[i] = invariant_p;
1904 ira_reg_equiv_const[i] = constant;
1910 /* Vector of substitutions of register numbers,
1911 used to map pseudo regs into hardware regs.
1912 This is set up as a result of register allocation.
1913 Element N is the hard reg assigned to pseudo reg N,
1914 or is -1 if no hard reg was assigned.
1915 If N is a hard reg number, element N is N. */
1916 short *reg_renumber;
1918 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
1919 the allocation found by IRA. */
1920 static void
1921 setup_reg_renumber (void)
1923 int regno, hard_regno;
1924 ira_allocno_t a;
1925 ira_allocno_iterator ai;
1927 caller_save_needed = 0;
1928 FOR_EACH_ALLOCNO (a, ai)
1930 /* There are no caps at this point. */
1931 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1932 if (! ALLOCNO_ASSIGNED_P (a))
1933 /* It can happen if A is not referenced but partially anticipated
1934 somewhere in a region. */
1935 ALLOCNO_ASSIGNED_P (a) = true;
1936 ira_free_allocno_updated_costs (a);
1937 hard_regno = ALLOCNO_HARD_REGNO (a);
1938 regno = ALLOCNO_REGNO (a);
1939 reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
1940 if (hard_regno >= 0)
1942 int i, nwords;
1943 enum reg_class pclass;
1944 ira_object_t obj;
1946 pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1947 nwords = ALLOCNO_NUM_OBJECTS (a);
1948 for (i = 0; i < nwords; i++)
1950 obj = ALLOCNO_OBJECT (a, i);
1951 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
1952 reg_class_contents[pclass]);
1954 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
1955 && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
1956 call_used_reg_set))
1958 ira_assert (!optimize || flag_caller_saves
1959 || regno >= ira_reg_equiv_len
1960 || ira_reg_equiv_const[regno]
1961 || ira_reg_equiv_invariant_p[regno]);
1962 caller_save_needed = 1;
1968 /* Set up allocno assignment flags for further allocation
1969 improvements. */
1970 static void
1971 setup_allocno_assignment_flags (void)
1973 int hard_regno;
1974 ira_allocno_t a;
1975 ira_allocno_iterator ai;
1977 FOR_EACH_ALLOCNO (a, ai)
1979 if (! ALLOCNO_ASSIGNED_P (a))
1980 /* It can happen if A is not referenced but partially anticipated
1981 somewhere in a region. */
1982 ira_free_allocno_updated_costs (a);
1983 hard_regno = ALLOCNO_HARD_REGNO (a);
1984 /* Don't assign hard registers to allocnos which are destination
1985 of removed store at the end of loop. It has no sense to keep
1986 the same value in different hard registers. It is also
1987 impossible to assign hard registers correctly to such
1988 allocnos because the cost info and info about intersected
1989 calls are incorrect for them. */
1990 ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0
1991 || ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p
1992 || (ALLOCNO_MEMORY_COST (a)
1993 - ALLOCNO_CLASS_COST (a)) < 0);
1994 ira_assert (hard_regno < 0
1995 || ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
1996 reg_class_contents
1997 [ALLOCNO_CLASS (a)]));
2001 /* Evaluate overall allocation cost and the costs for using hard
2002 registers and memory for allocnos. */
2003 static void
2004 calculate_allocation_cost (void)
2006 int hard_regno, cost;
2007 ira_allocno_t a;
2008 ira_allocno_iterator ai;
2010 ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
2011 FOR_EACH_ALLOCNO (a, ai)
2013 hard_regno = ALLOCNO_HARD_REGNO (a);
2014 ira_assert (hard_regno < 0
2015 || ! ira_hard_reg_not_in_set_p
2016 (hard_regno, ALLOCNO_MODE (a),
2017 reg_class_contents[ALLOCNO_CLASS (a)]));
2018 if (hard_regno < 0)
2020 cost = ALLOCNO_MEMORY_COST (a);
2021 ira_mem_cost += cost;
2023 else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
2025 cost = (ALLOCNO_HARD_REG_COSTS (a)
2026 [ira_class_hard_reg_index
2027 [ALLOCNO_CLASS (a)][hard_regno]]);
2028 ira_reg_cost += cost;
2030 else
2032 cost = ALLOCNO_CLASS_COST (a);
2033 ira_reg_cost += cost;
2035 ira_overall_cost += cost;
2038 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2040 fprintf (ira_dump_file,
2041 "+++Costs: overall %d, reg %d, mem %d, ld %d, st %d, move %d\n",
2042 ira_overall_cost, ira_reg_cost, ira_mem_cost,
2043 ira_load_cost, ira_store_cost, ira_shuffle_cost);
2044 fprintf (ira_dump_file, "+++ move loops %d, new jumps %d\n",
2045 ira_move_loops_num, ira_additional_jumps_num);
2050 #ifdef ENABLE_IRA_CHECKING
2051 /* Check the correctness of the allocation. We do need this because
2052 of complicated code to transform more one region internal
2053 representation into one region representation. */
2054 static void
2055 check_allocation (void)
2057 ira_allocno_t a;
2058 int hard_regno, nregs, conflict_nregs;
2059 ira_allocno_iterator ai;
2061 FOR_EACH_ALLOCNO (a, ai)
2063 int n = ALLOCNO_NUM_OBJECTS (a);
2064 int i;
2066 if (ALLOCNO_CAP_MEMBER (a) != NULL
2067 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
2068 continue;
2069 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
2070 if (nregs == 1)
2071 /* We allocated a single hard register. */
2072 n = 1;
2073 else if (n > 1)
2074 /* We allocated multiple hard registers, and we will test
2075 conflicts in a granularity of single hard regs. */
2076 nregs = 1;
2078 for (i = 0; i < n; i++)
2080 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2081 ira_object_t conflict_obj;
2082 ira_object_conflict_iterator oci;
2083 int this_regno = hard_regno;
2084 if (n > 1)
2086 if (WORDS_BIG_ENDIAN)
2087 this_regno += n - i - 1;
2088 else
2089 this_regno += i;
2091 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2093 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2094 int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
2095 if (conflict_hard_regno < 0)
2096 continue;
2098 conflict_nregs
2099 = (hard_regno_nregs
2100 [conflict_hard_regno][ALLOCNO_MODE (conflict_a)]);
2102 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1
2103 && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a))
2105 if (WORDS_BIG_ENDIAN)
2106 conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a)
2107 - OBJECT_SUBWORD (conflict_obj) - 1);
2108 else
2109 conflict_hard_regno += OBJECT_SUBWORD (conflict_obj);
2110 conflict_nregs = 1;
2113 if ((conflict_hard_regno <= this_regno
2114 && this_regno < conflict_hard_regno + conflict_nregs)
2115 || (this_regno <= conflict_hard_regno
2116 && conflict_hard_regno < this_regno + nregs))
2118 fprintf (stderr, "bad allocation for %d and %d\n",
2119 ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
2120 gcc_unreachable ();
2126 #endif
2128 /* Fix values of array REG_EQUIV_INIT after live range splitting done
2129 by IRA. */
2130 static void
2131 fix_reg_equiv_init (void)
2133 unsigned int max_regno = max_reg_num ();
2134 int i, new_regno, max;
2135 rtx x, prev, next, insn, set;
2137 if (VEC_length (reg_equivs_t, reg_equivs) < max_regno)
2139 max = VEC_length (reg_equivs_t, reg_equivs);
2140 grow_reg_equivs ();
2141 for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
2142 for (prev = NULL_RTX, x = reg_equiv_init (i);
2143 x != NULL_RTX;
2144 x = next)
2146 next = XEXP (x, 1);
2147 insn = XEXP (x, 0);
2148 set = single_set (insn);
2149 ira_assert (set != NULL_RTX
2150 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
2151 if (REG_P (SET_DEST (set))
2152 && ((int) REGNO (SET_DEST (set)) == i
2153 || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
2154 new_regno = REGNO (SET_DEST (set));
2155 else if (REG_P (SET_SRC (set))
2156 && ((int) REGNO (SET_SRC (set)) == i
2157 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
2158 new_regno = REGNO (SET_SRC (set));
2159 else
2160 gcc_unreachable ();
2161 if (new_regno == i)
2162 prev = x;
2163 else
2165 if (prev == NULL_RTX)
2166 reg_equiv_init (i) = next;
2167 else
2168 XEXP (prev, 1) = next;
2169 XEXP (x, 1) = reg_equiv_init (new_regno);
2170 reg_equiv_init (new_regno) = x;
2176 #ifdef ENABLE_IRA_CHECKING
2177 /* Print redundant memory-memory copies. */
2178 static void
2179 print_redundant_copies (void)
2181 int hard_regno;
2182 ira_allocno_t a;
2183 ira_copy_t cp, next_cp;
2184 ira_allocno_iterator ai;
2186 FOR_EACH_ALLOCNO (a, ai)
2188 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2189 /* It is a cap. */
2190 continue;
2191 hard_regno = ALLOCNO_HARD_REGNO (a);
2192 if (hard_regno >= 0)
2193 continue;
2194 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2195 if (cp->first == a)
2196 next_cp = cp->next_first_allocno_copy;
2197 else
2199 next_cp = cp->next_second_allocno_copy;
2200 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL
2201 && cp->insn != NULL_RTX
2202 && ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
2203 fprintf (ira_dump_file,
2204 " Redundant move from %d(freq %d):%d\n",
2205 INSN_UID (cp->insn), cp->freq, hard_regno);
2209 #endif
2211 /* Setup preferred and alternative classes for new pseudo-registers
2212 created by IRA starting with START. */
2213 static void
2214 setup_preferred_alternate_classes_for_new_pseudos (int start)
2216 int i, old_regno;
2217 int max_regno = max_reg_num ();
2219 for (i = start; i < max_regno; i++)
2221 old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
2222 ira_assert (i != old_regno);
2223 setup_reg_classes (i, reg_preferred_class (old_regno),
2224 reg_alternate_class (old_regno),
2225 reg_allocno_class (old_regno));
2226 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2227 fprintf (ira_dump_file,
2228 " New r%d: setting preferred %s, alternative %s\n",
2229 i, reg_class_names[reg_preferred_class (old_regno)],
2230 reg_class_names[reg_alternate_class (old_regno)]);
2236 /* Regional allocation can create new pseudo-registers. This function
2237 expands some arrays for pseudo-registers. */
2238 static void
2239 expand_reg_info (int old_size)
2241 int i;
2242 int size = max_reg_num ();
2244 resize_reg_info ();
2245 for (i = old_size; i < size; i++)
2246 setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
2249 /* Return TRUE if there is too high register pressure in the function.
2250 It is used to decide when stack slot sharing is worth to do. */
2251 static bool
2252 too_high_register_pressure_p (void)
2254 int i;
2255 enum reg_class pclass;
2257 for (i = 0; i < ira_pressure_classes_num; i++)
2259 pclass = ira_pressure_classes[i];
2260 if (ira_loop_tree_root->reg_pressure[pclass] > 10000)
2261 return true;
2263 return false;
2268 /* Indicate that hard register number FROM was eliminated and replaced with
2269 an offset from hard register number TO. The status of hard registers live
2270 at the start of a basic block is updated by replacing a use of FROM with
2271 a use of TO. */
2273 void
2274 mark_elimination (int from, int to)
2276 basic_block bb;
2278 FOR_EACH_BB (bb)
2280 /* We don't use LIVE info in IRA. */
2281 bitmap r = DF_LR_IN (bb);
2283 if (REGNO_REG_SET_P (r, from))
2285 CLEAR_REGNO_REG_SET (r, from);
2286 SET_REGNO_REG_SET (r, to);
2293 struct equivalence
2295 /* Set when a REG_EQUIV note is found or created. Use to
2296 keep track of what memory accesses might be created later,
2297 e.g. by reload. */
2298 rtx replacement;
2299 rtx *src_p;
2300 /* The list of each instruction which initializes this register. */
2301 rtx init_insns;
2302 /* Loop depth is used to recognize equivalences which appear
2303 to be present within the same loop (or in an inner loop). */
2304 int loop_depth;
2305 /* Nonzero if this had a preexisting REG_EQUIV note. */
2306 int is_arg_equivalence;
2307 /* Set when an attempt should be made to replace a register
2308 with the associated src_p entry. */
2309 char replace;
2312 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
2313 structure for that register. */
2314 static struct equivalence *reg_equiv;
2316 /* Used for communication between the following two functions: contains
2317 a MEM that we wish to ensure remains unchanged. */
2318 static rtx equiv_mem;
2320 /* Set nonzero if EQUIV_MEM is modified. */
2321 static int equiv_mem_modified;
2323 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
2324 Called via note_stores. */
2325 static void
2326 validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
2327 void *data ATTRIBUTE_UNUSED)
2329 if ((REG_P (dest)
2330 && reg_overlap_mentioned_p (dest, equiv_mem))
2331 || (MEM_P (dest)
2332 && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
2333 equiv_mem_modified = 1;
2336 /* Verify that no store between START and the death of REG invalidates
2337 MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
2338 by storing into an overlapping memory location, or with a non-const
2339 CALL_INSN.
2341 Return 1 if MEMREF remains valid. */
2342 static int
2343 validate_equiv_mem (rtx start, rtx reg, rtx memref)
2345 rtx insn;
2346 rtx note;
2348 equiv_mem = memref;
2349 equiv_mem_modified = 0;
2351 /* If the memory reference has side effects or is volatile, it isn't a
2352 valid equivalence. */
2353 if (side_effects_p (memref))
2354 return 0;
2356 for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
2358 if (! INSN_P (insn))
2359 continue;
2361 if (find_reg_note (insn, REG_DEAD, reg))
2362 return 1;
2364 /* This used to ignore readonly memory and const/pure calls. The problem
2365 is the equivalent form may reference a pseudo which gets assigned a
2366 call clobbered hard reg. When we later replace REG with its
2367 equivalent form, the value in the call-clobbered reg has been
2368 changed and all hell breaks loose. */
2369 if (CALL_P (insn))
2370 return 0;
2372 note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL);
2374 /* If a register mentioned in MEMREF is modified via an
2375 auto-increment, we lose the equivalence. Do the same if one
2376 dies; although we could extend the life, it doesn't seem worth
2377 the trouble. */
2379 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2380 if ((REG_NOTE_KIND (note) == REG_INC
2381 || REG_NOTE_KIND (note) == REG_DEAD)
2382 && REG_P (XEXP (note, 0))
2383 && reg_overlap_mentioned_p (XEXP (note, 0), memref))
2384 return 0;
2387 return 0;
2390 /* Returns zero if X is known to be invariant. */
2391 static int
2392 equiv_init_varies_p (rtx x)
2394 RTX_CODE code = GET_CODE (x);
2395 int i;
2396 const char *fmt;
2398 switch (code)
2400 case MEM:
2401 return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
2403 case CONST:
2404 case CONST_INT:
2405 case CONST_DOUBLE:
2406 case CONST_FIXED:
2407 case CONST_VECTOR:
2408 case SYMBOL_REF:
2409 case LABEL_REF:
2410 return 0;
2412 case REG:
2413 return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
2415 case ASM_OPERANDS:
2416 if (MEM_VOLATILE_P (x))
2417 return 1;
2419 /* Fall through. */
2421 default:
2422 break;
2425 fmt = GET_RTX_FORMAT (code);
2426 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2427 if (fmt[i] == 'e')
2429 if (equiv_init_varies_p (XEXP (x, i)))
2430 return 1;
2432 else if (fmt[i] == 'E')
2434 int j;
2435 for (j = 0; j < XVECLEN (x, i); j++)
2436 if (equiv_init_varies_p (XVECEXP (x, i, j)))
2437 return 1;
2440 return 0;
2443 /* Returns nonzero if X (used to initialize register REGNO) is movable.
2444 X is only movable if the registers it uses have equivalent initializations
2445 which appear to be within the same loop (or in an inner loop) and movable
2446 or if they are not candidates for local_alloc and don't vary. */
2447 static int
2448 equiv_init_movable_p (rtx x, int regno)
2450 int i, j;
2451 const char *fmt;
2452 enum rtx_code code = GET_CODE (x);
2454 switch (code)
2456 case SET:
2457 return equiv_init_movable_p (SET_SRC (x), regno);
2459 case CC0:
2460 case CLOBBER:
2461 return 0;
2463 case PRE_INC:
2464 case PRE_DEC:
2465 case POST_INC:
2466 case POST_DEC:
2467 case PRE_MODIFY:
2468 case POST_MODIFY:
2469 return 0;
2471 case REG:
2472 return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
2473 && reg_equiv[REGNO (x)].replace)
2474 || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS
2475 && ! rtx_varies_p (x, 0)));
2477 case UNSPEC_VOLATILE:
2478 return 0;
2480 case ASM_OPERANDS:
2481 if (MEM_VOLATILE_P (x))
2482 return 0;
2484 /* Fall through. */
2486 default:
2487 break;
2490 fmt = GET_RTX_FORMAT (code);
2491 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2492 switch (fmt[i])
2494 case 'e':
2495 if (! equiv_init_movable_p (XEXP (x, i), regno))
2496 return 0;
2497 break;
2498 case 'E':
2499 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2500 if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
2501 return 0;
2502 break;
2505 return 1;
2508 /* TRUE if X uses any registers for which reg_equiv[REGNO].replace is
2509 true. */
2510 static int
2511 contains_replace_regs (rtx x)
2513 int i, j;
2514 const char *fmt;
2515 enum rtx_code code = GET_CODE (x);
2517 switch (code)
2519 case CONST_INT:
2520 case CONST:
2521 case LABEL_REF:
2522 case SYMBOL_REF:
2523 case CONST_DOUBLE:
2524 case CONST_FIXED:
2525 case CONST_VECTOR:
2526 case PC:
2527 case CC0:
2528 case HIGH:
2529 return 0;
2531 case REG:
2532 return reg_equiv[REGNO (x)].replace;
2534 default:
2535 break;
2538 fmt = GET_RTX_FORMAT (code);
2539 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2540 switch (fmt[i])
2542 case 'e':
2543 if (contains_replace_regs (XEXP (x, i)))
2544 return 1;
2545 break;
2546 case 'E':
2547 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2548 if (contains_replace_regs (XVECEXP (x, i, j)))
2549 return 1;
2550 break;
2553 return 0;
2556 /* TRUE if X references a memory location that would be affected by a store
2557 to MEMREF. */
2558 static int
2559 memref_referenced_p (rtx memref, rtx x)
2561 int i, j;
2562 const char *fmt;
2563 enum rtx_code code = GET_CODE (x);
2565 switch (code)
2567 case CONST_INT:
2568 case CONST:
2569 case LABEL_REF:
2570 case SYMBOL_REF:
2571 case CONST_DOUBLE:
2572 case CONST_FIXED:
2573 case CONST_VECTOR:
2574 case PC:
2575 case CC0:
2576 case HIGH:
2577 case LO_SUM:
2578 return 0;
2580 case REG:
2581 return (reg_equiv[REGNO (x)].replacement
2582 && memref_referenced_p (memref,
2583 reg_equiv[REGNO (x)].replacement));
2585 case MEM:
2586 if (true_dependence (memref, VOIDmode, x, rtx_varies_p))
2587 return 1;
2588 break;
2590 case SET:
2591 /* If we are setting a MEM, it doesn't count (its address does), but any
2592 other SET_DEST that has a MEM in it is referencing the MEM. */
2593 if (MEM_P (SET_DEST (x)))
2595 if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
2596 return 1;
2598 else if (memref_referenced_p (memref, SET_DEST (x)))
2599 return 1;
2601 return memref_referenced_p (memref, SET_SRC (x));
2603 default:
2604 break;
2607 fmt = GET_RTX_FORMAT (code);
2608 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2609 switch (fmt[i])
2611 case 'e':
2612 if (memref_referenced_p (memref, XEXP (x, i)))
2613 return 1;
2614 break;
2615 case 'E':
2616 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2617 if (memref_referenced_p (memref, XVECEXP (x, i, j)))
2618 return 1;
2619 break;
2622 return 0;
2625 /* TRUE if some insn in the range (START, END] references a memory location
2626 that would be affected by a store to MEMREF. */
2627 static int
2628 memref_used_between_p (rtx memref, rtx start, rtx end)
2630 rtx insn;
2632 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2633 insn = NEXT_INSN (insn))
2635 if (!NONDEBUG_INSN_P (insn))
2636 continue;
2638 if (memref_referenced_p (memref, PATTERN (insn)))
2639 return 1;
2641 /* Nonconst functions may access memory. */
2642 if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
2643 return 1;
2646 return 0;
2649 /* Mark REG as having no known equivalence.
2650 Some instructions might have been processed before and furnished
2651 with REG_EQUIV notes for this register; these notes will have to be
2652 removed.
2653 STORE is the piece of RTL that does the non-constant / conflicting
2654 assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
2655 but needs to be there because this function is called from note_stores. */
2656 static void
2657 no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED,
2658 void *data ATTRIBUTE_UNUSED)
2660 int regno;
2661 rtx list;
2663 if (!REG_P (reg))
2664 return;
2665 regno = REGNO (reg);
2666 list = reg_equiv[regno].init_insns;
2667 if (list == const0_rtx)
2668 return;
2669 reg_equiv[regno].init_insns = const0_rtx;
2670 reg_equiv[regno].replacement = NULL_RTX;
2671 /* This doesn't matter for equivalences made for argument registers, we
2672 should keep their initialization insns. */
2673 if (reg_equiv[regno].is_arg_equivalence)
2674 return;
2675 reg_equiv_init (regno) = NULL_RTX;
2676 for (; list; list = XEXP (list, 1))
2678 rtx insn = XEXP (list, 0);
2679 remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
2683 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
2684 equivalent replacement. */
2686 static rtx
2687 adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data)
2689 if (REG_P (loc))
2691 bitmap cleared_regs = (bitmap) data;
2692 if (bitmap_bit_p (cleared_regs, REGNO (loc)))
2693 return simplify_replace_fn_rtx (*reg_equiv[REGNO (loc)].src_p,
2694 NULL_RTX, adjust_cleared_regs, data);
2696 return NULL_RTX;
2699 /* Nonzero if we recorded an equivalence for a LABEL_REF. */
2700 static int recorded_label_ref;
2702 /* Find registers that are equivalent to a single value throughout the
2703 compilation (either because they can be referenced in memory or are
2704 set once from a single constant). Lower their priority for a
2705 register.
2707 If such a register is only referenced once, try substituting its
2708 value into the using insn. If it succeeds, we can eliminate the
2709 register completely.
2711 Initialize the REG_EQUIV_INIT array of initializing insns.
2713 Return non-zero if jump label rebuilding should be done. */
2714 static int
2715 update_equiv_regs (void)
2717 rtx insn;
2718 basic_block bb;
2719 int loop_depth;
2720 bitmap cleared_regs;
2722 /* We need to keep track of whether or not we recorded a LABEL_REF so
2723 that we know if the jump optimizer needs to be rerun. */
2724 recorded_label_ref = 0;
2726 reg_equiv = XCNEWVEC (struct equivalence, max_regno);
2727 grow_reg_equivs ();
2729 init_alias_analysis ();
2731 /* Scan the insns and find which registers have equivalences. Do this
2732 in a separate scan of the insns because (due to -fcse-follow-jumps)
2733 a register can be set below its use. */
2734 FOR_EACH_BB (bb)
2736 loop_depth = bb->loop_depth;
2738 for (insn = BB_HEAD (bb);
2739 insn != NEXT_INSN (BB_END (bb));
2740 insn = NEXT_INSN (insn))
2742 rtx note;
2743 rtx set;
2744 rtx dest, src;
2745 int regno;
2747 if (! INSN_P (insn))
2748 continue;
2750 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2751 if (REG_NOTE_KIND (note) == REG_INC)
2752 no_equiv (XEXP (note, 0), note, NULL);
2754 set = single_set (insn);
2756 /* If this insn contains more (or less) than a single SET,
2757 only mark all destinations as having no known equivalence. */
2758 if (set == 0)
2760 note_stores (PATTERN (insn), no_equiv, NULL);
2761 continue;
2763 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2765 int i;
2767 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
2769 rtx part = XVECEXP (PATTERN (insn), 0, i);
2770 if (part != set)
2771 note_stores (part, no_equiv, NULL);
2775 dest = SET_DEST (set);
2776 src = SET_SRC (set);
2778 /* See if this is setting up the equivalence between an argument
2779 register and its stack slot. */
2780 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
2781 if (note)
2783 gcc_assert (REG_P (dest));
2784 regno = REGNO (dest);
2786 /* Note that we don't want to clear reg_equiv_init even if there
2787 are multiple sets of this register. */
2788 reg_equiv[regno].is_arg_equivalence = 1;
2790 /* Record for reload that this is an equivalencing insn. */
2791 if (rtx_equal_p (src, XEXP (note, 0)))
2792 reg_equiv_init (regno)
2793 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
2795 /* Continue normally in case this is a candidate for
2796 replacements. */
2799 if (!optimize)
2800 continue;
2802 /* We only handle the case of a pseudo register being set
2803 once, or always to the same value. */
2804 /* ??? The mn10200 port breaks if we add equivalences for
2805 values that need an ADDRESS_REGS register and set them equivalent
2806 to a MEM of a pseudo. The actual problem is in the over-conservative
2807 handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
2808 calculate_needs, but we traditionally work around this problem
2809 here by rejecting equivalences when the destination is in a register
2810 that's likely spilled. This is fragile, of course, since the
2811 preferred class of a pseudo depends on all instructions that set
2812 or use it. */
2814 if (!REG_P (dest)
2815 || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
2816 || reg_equiv[regno].init_insns == const0_rtx
2817 || (targetm.class_likely_spilled_p (reg_preferred_class (regno))
2818 && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
2820 /* This might be setting a SUBREG of a pseudo, a pseudo that is
2821 also set somewhere else to a constant. */
2822 note_stores (set, no_equiv, NULL);
2823 continue;
2826 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
2828 /* cse sometimes generates function invariants, but doesn't put a
2829 REG_EQUAL note on the insn. Since this note would be redundant,
2830 there's no point creating it earlier than here. */
2831 if (! note && ! rtx_varies_p (src, 0))
2832 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2834 /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
2835 since it represents a function call */
2836 if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
2837 note = NULL_RTX;
2839 if (DF_REG_DEF_COUNT (regno) != 1
2840 && (! note
2841 || rtx_varies_p (XEXP (note, 0), 0)
2842 || (reg_equiv[regno].replacement
2843 && ! rtx_equal_p (XEXP (note, 0),
2844 reg_equiv[regno].replacement))))
2846 no_equiv (dest, set, NULL);
2847 continue;
2849 /* Record this insn as initializing this register. */
2850 reg_equiv[regno].init_insns
2851 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
2853 /* If this register is known to be equal to a constant, record that
2854 it is always equivalent to the constant. */
2855 if (DF_REG_DEF_COUNT (regno) == 1
2856 && note && ! rtx_varies_p (XEXP (note, 0), 0))
2858 rtx note_value = XEXP (note, 0);
2859 remove_note (insn, note);
2860 set_unique_reg_note (insn, REG_EQUIV, note_value);
2863 /* If this insn introduces a "constant" register, decrease the priority
2864 of that register. Record this insn if the register is only used once
2865 more and the equivalence value is the same as our source.
2867 The latter condition is checked for two reasons: First, it is an
2868 indication that it may be more efficient to actually emit the insn
2869 as written (if no registers are available, reload will substitute
2870 the equivalence). Secondly, it avoids problems with any registers
2871 dying in this insn whose death notes would be missed.
2873 If we don't have a REG_EQUIV note, see if this insn is loading
2874 a register used only in one basic block from a MEM. If so, and the
2875 MEM remains unchanged for the life of the register, add a REG_EQUIV
2876 note. */
2878 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
2880 if (note == 0 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
2881 && MEM_P (SET_SRC (set))
2882 && validate_equiv_mem (insn, dest, SET_SRC (set)))
2883 note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (SET_SRC (set)));
2885 if (note)
2887 int regno = REGNO (dest);
2888 rtx x = XEXP (note, 0);
2890 /* If we haven't done so, record for reload that this is an
2891 equivalencing insn. */
2892 if (!reg_equiv[regno].is_arg_equivalence)
2893 reg_equiv_init (regno)
2894 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
2896 /* Record whether or not we created a REG_EQUIV note for a LABEL_REF.
2897 We might end up substituting the LABEL_REF for uses of the
2898 pseudo here or later. That kind of transformation may turn an
2899 indirect jump into a direct jump, in which case we must rerun the
2900 jump optimizer to ensure that the JUMP_LABEL fields are valid. */
2901 if (GET_CODE (x) == LABEL_REF
2902 || (GET_CODE (x) == CONST
2903 && GET_CODE (XEXP (x, 0)) == PLUS
2904 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)))
2905 recorded_label_ref = 1;
2907 reg_equiv[regno].replacement = x;
2908 reg_equiv[regno].src_p = &SET_SRC (set);
2909 reg_equiv[regno].loop_depth = loop_depth;
2911 /* Don't mess with things live during setjmp. */
2912 if (REG_LIVE_LENGTH (regno) >= 0 && optimize)
2914 /* Note that the statement below does not affect the priority
2915 in local-alloc! */
2916 REG_LIVE_LENGTH (regno) *= 2;
2918 /* If the register is referenced exactly twice, meaning it is
2919 set once and used once, indicate that the reference may be
2920 replaced by the equivalence we computed above. Do this
2921 even if the register is only used in one block so that
2922 dependencies can be handled where the last register is
2923 used in a different block (i.e. HIGH / LO_SUM sequences)
2924 and to reduce the number of registers alive across
2925 calls. */
2927 if (REG_N_REFS (regno) == 2
2928 && (rtx_equal_p (x, src)
2929 || ! equiv_init_varies_p (src))
2930 && NONJUMP_INSN_P (insn)
2931 && equiv_init_movable_p (PATTERN (insn), regno))
2932 reg_equiv[regno].replace = 1;
2938 if (!optimize)
2939 goto out;
2941 /* A second pass, to gather additional equivalences with memory. This needs
2942 to be done after we know which registers we are going to replace. */
2944 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2946 rtx set, src, dest;
2947 unsigned regno;
2949 if (! INSN_P (insn))
2950 continue;
2952 set = single_set (insn);
2953 if (! set)
2954 continue;
2956 dest = SET_DEST (set);
2957 src = SET_SRC (set);
2959 /* If this sets a MEM to the contents of a REG that is only used
2960 in a single basic block, see if the register is always equivalent
2961 to that memory location and if moving the store from INSN to the
2962 insn that set REG is safe. If so, put a REG_EQUIV note on the
2963 initializing insn.
2965 Don't add a REG_EQUIV note if the insn already has one. The existing
2966 REG_EQUIV is likely more useful than the one we are adding.
2968 If one of the regs in the address has reg_equiv[REGNO].replace set,
2969 then we can't add this REG_EQUIV note. The reg_equiv[REGNO].replace
2970 optimization may move the set of this register immediately before
2971 insn, which puts it after reg_equiv[REGNO].init_insns, and hence
2972 the mention in the REG_EQUIV note would be to an uninitialized
2973 pseudo. */
2975 if (MEM_P (dest) && REG_P (src)
2976 && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
2977 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
2978 && DF_REG_DEF_COUNT (regno) == 1
2979 && reg_equiv[regno].init_insns != 0
2980 && reg_equiv[regno].init_insns != const0_rtx
2981 && ! find_reg_note (XEXP (reg_equiv[regno].init_insns, 0),
2982 REG_EQUIV, NULL_RTX)
2983 && ! contains_replace_regs (XEXP (dest, 0)))
2985 rtx init_insn = XEXP (reg_equiv[regno].init_insns, 0);
2986 if (validate_equiv_mem (init_insn, src, dest)
2987 && ! memref_used_between_p (dest, init_insn, insn)
2988 /* Attaching a REG_EQUIV note will fail if INIT_INSN has
2989 multiple sets. */
2990 && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
2992 /* This insn makes the equivalence, not the one initializing
2993 the register. */
2994 reg_equiv_init (regno)
2995 = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
2996 df_notes_rescan (init_insn);
3001 cleared_regs = BITMAP_ALLOC (NULL);
3002 /* Now scan all regs killed in an insn to see if any of them are
3003 registers only used that once. If so, see if we can replace the
3004 reference with the equivalent form. If we can, delete the
3005 initializing reference and this register will go away. If we
3006 can't replace the reference, and the initializing reference is
3007 within the same loop (or in an inner loop), then move the register
3008 initialization just before the use, so that they are in the same
3009 basic block. */
3010 FOR_EACH_BB_REVERSE (bb)
3012 loop_depth = bb->loop_depth;
3013 for (insn = BB_END (bb);
3014 insn != PREV_INSN (BB_HEAD (bb));
3015 insn = PREV_INSN (insn))
3017 rtx link;
3019 if (! INSN_P (insn))
3020 continue;
3022 /* Don't substitute into a non-local goto, this confuses CFG. */
3023 if (JUMP_P (insn)
3024 && find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
3025 continue;
3027 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
3029 if (REG_NOTE_KIND (link) == REG_DEAD
3030 /* Make sure this insn still refers to the register. */
3031 && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
3033 int regno = REGNO (XEXP (link, 0));
3034 rtx equiv_insn;
3036 if (! reg_equiv[regno].replace
3037 || reg_equiv[regno].loop_depth < loop_depth
3038 /* There is no sense to move insns if we did
3039 register pressure-sensitive scheduling was
3040 done because it will not improve allocation
3041 but worsen insn schedule with a big
3042 probability. */
3043 || (flag_sched_pressure && flag_schedule_insns))
3044 continue;
3046 /* reg_equiv[REGNO].replace gets set only when
3047 REG_N_REFS[REGNO] is 2, i.e. the register is set
3048 once and used once. (If it were only set, but not used,
3049 flow would have deleted the setting insns.) Hence
3050 there can only be one insn in reg_equiv[REGNO].init_insns. */
3051 gcc_assert (reg_equiv[regno].init_insns
3052 && !XEXP (reg_equiv[regno].init_insns, 1));
3053 equiv_insn = XEXP (reg_equiv[regno].init_insns, 0);
3055 /* We may not move instructions that can throw, since
3056 that changes basic block boundaries and we are not
3057 prepared to adjust the CFG to match. */
3058 if (can_throw_internal (equiv_insn))
3059 continue;
3061 if (asm_noperands (PATTERN (equiv_insn)) < 0
3062 && validate_replace_rtx (regno_reg_rtx[regno],
3063 *(reg_equiv[regno].src_p), insn))
3065 rtx equiv_link;
3066 rtx last_link;
3067 rtx note;
3069 /* Find the last note. */
3070 for (last_link = link; XEXP (last_link, 1);
3071 last_link = XEXP (last_link, 1))
3074 /* Append the REG_DEAD notes from equiv_insn. */
3075 equiv_link = REG_NOTES (equiv_insn);
3076 while (equiv_link)
3078 note = equiv_link;
3079 equiv_link = XEXP (equiv_link, 1);
3080 if (REG_NOTE_KIND (note) == REG_DEAD)
3082 remove_note (equiv_insn, note);
3083 XEXP (last_link, 1) = note;
3084 XEXP (note, 1) = NULL_RTX;
3085 last_link = note;
3089 remove_death (regno, insn);
3090 SET_REG_N_REFS (regno, 0);
3091 REG_FREQ (regno) = 0;
3092 delete_insn (equiv_insn);
3094 reg_equiv[regno].init_insns
3095 = XEXP (reg_equiv[regno].init_insns, 1);
3097 reg_equiv_init (regno) = NULL_RTX;
3098 bitmap_set_bit (cleared_regs, regno);
3100 /* Move the initialization of the register to just before
3101 INSN. Update the flow information. */
3102 else if (prev_nondebug_insn (insn) != equiv_insn)
3104 rtx new_insn;
3106 new_insn = emit_insn_before (PATTERN (equiv_insn), insn);
3107 REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
3108 REG_NOTES (equiv_insn) = 0;
3109 /* Rescan it to process the notes. */
3110 df_insn_rescan (new_insn);
3112 /* Make sure this insn is recognized before
3113 reload begins, otherwise
3114 eliminate_regs_in_insn will die. */
3115 INSN_CODE (new_insn) = INSN_CODE (equiv_insn);
3117 delete_insn (equiv_insn);
3119 XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
3121 REG_BASIC_BLOCK (regno) = bb->index;
3122 REG_N_CALLS_CROSSED (regno) = 0;
3123 REG_FREQ_CALLS_CROSSED (regno) = 0;
3124 REG_N_THROWING_CALLS_CROSSED (regno) = 0;
3125 REG_LIVE_LENGTH (regno) = 2;
3127 if (insn == BB_HEAD (bb))
3128 BB_HEAD (bb) = PREV_INSN (insn);
3130 reg_equiv_init (regno)
3131 = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
3132 bitmap_set_bit (cleared_regs, regno);
3139 if (!bitmap_empty_p (cleared_regs))
3141 FOR_EACH_BB (bb)
3143 bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
3144 bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
3145 bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
3146 bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
3149 /* Last pass - adjust debug insns referencing cleared regs. */
3150 if (MAY_HAVE_DEBUG_INSNS)
3151 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3152 if (DEBUG_INSN_P (insn))
3154 rtx old_loc = INSN_VAR_LOCATION_LOC (insn);
3155 INSN_VAR_LOCATION_LOC (insn)
3156 = simplify_replace_fn_rtx (old_loc, NULL_RTX,
3157 adjust_cleared_regs,
3158 (void *) cleared_regs);
3159 if (old_loc != INSN_VAR_LOCATION_LOC (insn))
3160 df_insn_rescan (insn);
3164 BITMAP_FREE (cleared_regs);
3166 out:
3167 /* Clean up. */
3169 end_alias_analysis ();
3170 free (reg_equiv);
3171 return recorded_label_ref;
3176 /* Print chain C to FILE. */
3177 static void
3178 print_insn_chain (FILE *file, struct insn_chain *c)
3180 fprintf (file, "insn=%d, ", INSN_UID(c->insn));
3181 bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
3182 bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
3186 /* Print all reload_insn_chains to FILE. */
3187 static void
3188 print_insn_chains (FILE *file)
3190 struct insn_chain *c;
3191 for (c = reload_insn_chain; c ; c = c->next)
3192 print_insn_chain (file, c);
3195 /* Return true if pseudo REGNO should be added to set live_throughout
3196 or dead_or_set of the insn chains for reload consideration. */
3197 static bool
3198 pseudo_for_reload_consideration_p (int regno)
3200 /* Consider spilled pseudos too for IRA because they still have a
3201 chance to get hard-registers in the reload when IRA is used. */
3202 return (reg_renumber[regno] >= 0 || ira_conflicts_p);
3205 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
3206 REG to the number of nregs, and INIT_VALUE to get the
3207 initialization. ALLOCNUM need not be the regno of REG. */
3208 static void
3209 init_live_subregs (bool init_value, sbitmap *live_subregs,
3210 int *live_subregs_used, int allocnum, rtx reg)
3212 unsigned int regno = REGNO (SUBREG_REG (reg));
3213 int size = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
3215 gcc_assert (size > 0);
3217 /* Been there, done that. */
3218 if (live_subregs_used[allocnum])
3219 return;
3221 /* Create a new one with zeros. */
3222 if (live_subregs[allocnum] == NULL)
3223 live_subregs[allocnum] = sbitmap_alloc (size);
3225 /* If the entire reg was live before blasting into subregs, we need
3226 to init all of the subregs to ones else init to 0. */
3227 if (init_value)
3228 sbitmap_ones (live_subregs[allocnum]);
3229 else
3230 sbitmap_zero (live_subregs[allocnum]);
3232 /* Set the number of bits that we really want. */
3233 live_subregs_used[allocnum] = size;
3236 /* Walk the insns of the current function and build reload_insn_chain,
3237 and record register life information. */
3238 static void
3239 build_insn_chain (void)
3241 unsigned int i;
3242 struct insn_chain **p = &reload_insn_chain;
3243 basic_block bb;
3244 struct insn_chain *c = NULL;
3245 struct insn_chain *next = NULL;
3246 bitmap live_relevant_regs = BITMAP_ALLOC (NULL);
3247 bitmap elim_regset = BITMAP_ALLOC (NULL);
3248 /* live_subregs is a vector used to keep accurate information about
3249 which hardregs are live in multiword pseudos. live_subregs and
3250 live_subregs_used are indexed by pseudo number. The live_subreg
3251 entry for a particular pseudo is only used if the corresponding
3252 element is non zero in live_subregs_used. The value in
3253 live_subregs_used is number of bytes that the pseudo can
3254 occupy. */
3255 sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
3256 int *live_subregs_used = XNEWVEC (int, max_regno);
3258 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3259 if (TEST_HARD_REG_BIT (eliminable_regset, i))
3260 bitmap_set_bit (elim_regset, i);
3261 FOR_EACH_BB_REVERSE (bb)
3263 bitmap_iterator bi;
3264 rtx insn;
3266 CLEAR_REG_SET (live_relevant_regs);
3267 memset (live_subregs_used, 0, max_regno * sizeof (int));
3269 EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb), 0, i, bi)
3271 if (i >= FIRST_PSEUDO_REGISTER)
3272 break;
3273 bitmap_set_bit (live_relevant_regs, i);
3276 EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb),
3277 FIRST_PSEUDO_REGISTER, i, bi)
3279 if (pseudo_for_reload_consideration_p (i))
3280 bitmap_set_bit (live_relevant_regs, i);
3283 FOR_BB_INSNS_REVERSE (bb, insn)
3285 if (!NOTE_P (insn) && !BARRIER_P (insn))
3287 unsigned int uid = INSN_UID (insn);
3288 df_ref *def_rec;
3289 df_ref *use_rec;
3291 c = new_insn_chain ();
3292 c->next = next;
3293 next = c;
3294 *p = c;
3295 p = &c->prev;
3297 c->insn = insn;
3298 c->block = bb->index;
3300 if (INSN_P (insn))
3301 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
3303 df_ref def = *def_rec;
3304 unsigned int regno = DF_REF_REGNO (def);
3306 /* Ignore may clobbers because these are generated
3307 from calls. However, every other kind of def is
3308 added to dead_or_set. */
3309 if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
3311 if (regno < FIRST_PSEUDO_REGISTER)
3313 if (!fixed_regs[regno])
3314 bitmap_set_bit (&c->dead_or_set, regno);
3316 else if (pseudo_for_reload_consideration_p (regno))
3317 bitmap_set_bit (&c->dead_or_set, regno);
3320 if ((regno < FIRST_PSEUDO_REGISTER
3321 || reg_renumber[regno] >= 0
3322 || ira_conflicts_p)
3323 && (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
3325 rtx reg = DF_REF_REG (def);
3327 /* We can model subregs, but not if they are
3328 wrapped in ZERO_EXTRACTS. */
3329 if (GET_CODE (reg) == SUBREG
3330 && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
3332 unsigned int start = SUBREG_BYTE (reg);
3333 unsigned int last = start
3334 + GET_MODE_SIZE (GET_MODE (reg));
3336 init_live_subregs
3337 (bitmap_bit_p (live_relevant_regs, regno),
3338 live_subregs, live_subregs_used, regno, reg);
3340 if (!DF_REF_FLAGS_IS_SET
3341 (def, DF_REF_STRICT_LOW_PART))
3343 /* Expand the range to cover entire words.
3344 Bytes added here are "don't care". */
3345 start
3346 = start / UNITS_PER_WORD * UNITS_PER_WORD;
3347 last = ((last + UNITS_PER_WORD - 1)
3348 / UNITS_PER_WORD * UNITS_PER_WORD);
3351 /* Ignore the paradoxical bits. */
3352 if ((int)last > live_subregs_used[regno])
3353 last = live_subregs_used[regno];
3355 while (start < last)
3357 RESET_BIT (live_subregs[regno], start);
3358 start++;
3361 if (sbitmap_empty_p (live_subregs[regno]))
3363 live_subregs_used[regno] = 0;
3364 bitmap_clear_bit (live_relevant_regs, regno);
3366 else
3367 /* Set live_relevant_regs here because
3368 that bit has to be true to get us to
3369 look at the live_subregs fields. */
3370 bitmap_set_bit (live_relevant_regs, regno);
3372 else
3374 /* DF_REF_PARTIAL is generated for
3375 subregs, STRICT_LOW_PART, and
3376 ZERO_EXTRACT. We handle the subreg
3377 case above so here we have to keep from
3378 modeling the def as a killing def. */
3379 if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
3381 bitmap_clear_bit (live_relevant_regs, regno);
3382 live_subregs_used[regno] = 0;
3388 bitmap_and_compl_into (live_relevant_regs, elim_regset);
3389 bitmap_copy (&c->live_throughout, live_relevant_regs);
3391 if (INSN_P (insn))
3392 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
3394 df_ref use = *use_rec;
3395 unsigned int regno = DF_REF_REGNO (use);
3396 rtx reg = DF_REF_REG (use);
3398 /* DF_REF_READ_WRITE on a use means that this use
3399 is fabricated from a def that is a partial set
3400 to a multiword reg. Here, we only model the
3401 subreg case that is not wrapped in ZERO_EXTRACT
3402 precisely so we do not need to look at the
3403 fabricated use. */
3404 if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
3405 && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
3406 && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
3407 continue;
3409 /* Add the last use of each var to dead_or_set. */
3410 if (!bitmap_bit_p (live_relevant_regs, regno))
3412 if (regno < FIRST_PSEUDO_REGISTER)
3414 if (!fixed_regs[regno])
3415 bitmap_set_bit (&c->dead_or_set, regno);
3417 else if (pseudo_for_reload_consideration_p (regno))
3418 bitmap_set_bit (&c->dead_or_set, regno);
3421 if (regno < FIRST_PSEUDO_REGISTER
3422 || pseudo_for_reload_consideration_p (regno))
3424 if (GET_CODE (reg) == SUBREG
3425 && !DF_REF_FLAGS_IS_SET (use,
3426 DF_REF_SIGN_EXTRACT
3427 | DF_REF_ZERO_EXTRACT))
3429 unsigned int start = SUBREG_BYTE (reg);
3430 unsigned int last = start
3431 + GET_MODE_SIZE (GET_MODE (reg));
3433 init_live_subregs
3434 (bitmap_bit_p (live_relevant_regs, regno),
3435 live_subregs, live_subregs_used, regno, reg);
3437 /* Ignore the paradoxical bits. */
3438 if ((int)last > live_subregs_used[regno])
3439 last = live_subregs_used[regno];
3441 while (start < last)
3443 SET_BIT (live_subregs[regno], start);
3444 start++;
3447 else
3448 /* Resetting the live_subregs_used is
3449 effectively saying do not use the subregs
3450 because we are reading the whole
3451 pseudo. */
3452 live_subregs_used[regno] = 0;
3453 bitmap_set_bit (live_relevant_regs, regno);
3459 /* FIXME!! The following code is a disaster. Reload needs to see the
3460 labels and jump tables that are just hanging out in between
3461 the basic blocks. See pr33676. */
3462 insn = BB_HEAD (bb);
3464 /* Skip over the barriers and cruft. */
3465 while (insn && (BARRIER_P (insn) || NOTE_P (insn)
3466 || BLOCK_FOR_INSN (insn) == bb))
3467 insn = PREV_INSN (insn);
3469 /* While we add anything except barriers and notes, the focus is
3470 to get the labels and jump tables into the
3471 reload_insn_chain. */
3472 while (insn)
3474 if (!NOTE_P (insn) && !BARRIER_P (insn))
3476 if (BLOCK_FOR_INSN (insn))
3477 break;
3479 c = new_insn_chain ();
3480 c->next = next;
3481 next = c;
3482 *p = c;
3483 p = &c->prev;
3485 /* The block makes no sense here, but it is what the old
3486 code did. */
3487 c->block = bb->index;
3488 c->insn = insn;
3489 bitmap_copy (&c->live_throughout, live_relevant_regs);
3491 insn = PREV_INSN (insn);
3495 for (i = 0; i < (unsigned int) max_regno; i++)
3496 free (live_subregs[i]);
3498 reload_insn_chain = c;
3499 *p = NULL;
3501 free (live_subregs);
3502 free (live_subregs_used);
3503 BITMAP_FREE (live_relevant_regs);
3504 BITMAP_FREE (elim_regset);
3506 if (dump_file)
3507 print_insn_chains (dump_file);
3512 /* All natural loops. */
3513 struct loops ira_loops;
3515 /* True if we have allocno conflicts. It is false for non-optimized
3516 mode or when the conflict table is too big. */
3517 bool ira_conflicts_p;
3519 /* This is the main entry of IRA. */
3520 static void
3521 ira (FILE *f)
3523 int overall_cost_before, allocated_reg_info_size;
3524 bool loops_p;
3525 int max_regno_before_ira, ira_max_point_before_emit;
3526 int rebuild_p;
3527 int saved_flag_ira_share_spill_slots;
3528 basic_block bb;
3530 timevar_push (TV_IRA);
3532 if (flag_caller_saves)
3533 init_caller_save ();
3535 if (flag_ira_verbose < 10)
3537 internal_flag_ira_verbose = flag_ira_verbose;
3538 ira_dump_file = f;
3540 else
3542 internal_flag_ira_verbose = flag_ira_verbose - 10;
3543 ira_dump_file = stderr;
3546 ira_conflicts_p = optimize > 0;
3547 setup_prohibited_mode_move_regs ();
3549 df_note_add_problem ();
3551 if (optimize == 1)
3553 df_live_add_problem ();
3554 df_live_set_all_dirty ();
3556 #ifdef ENABLE_CHECKING
3557 df->changeable_flags |= DF_VERIFY_SCHEDULED;
3558 #endif
3559 df_analyze ();
3560 df_clear_flags (DF_NO_INSN_RESCAN);
3561 regstat_init_n_sets_and_refs ();
3562 regstat_compute_ri ();
3564 /* If we are not optimizing, then this is the only place before
3565 register allocation where dataflow is done. And that is needed
3566 to generate these warnings. */
3567 if (warn_clobbered)
3568 generate_setjmp_warnings ();
3570 /* Determine if the current function is a leaf before running IRA
3571 since this can impact optimizations done by the prologue and
3572 epilogue thus changing register elimination offsets. */
3573 current_function_is_leaf = leaf_function_p ();
3575 if (resize_reg_info () && flag_ira_loop_pressure)
3576 ira_set_pseudo_classes (ira_dump_file);
3578 rebuild_p = update_equiv_regs ();
3580 #ifndef IRA_NO_OBSTACK
3581 gcc_obstack_init (&ira_obstack);
3582 #endif
3583 bitmap_obstack_initialize (&ira_bitmap_obstack);
3584 if (optimize)
3586 max_regno = max_reg_num ();
3587 ira_reg_equiv_len = max_regno;
3588 ira_reg_equiv_invariant_p
3589 = (bool *) ira_allocate (max_regno * sizeof (bool));
3590 memset (ira_reg_equiv_invariant_p, 0, max_regno * sizeof (bool));
3591 ira_reg_equiv_const = (rtx *) ira_allocate (max_regno * sizeof (rtx));
3592 memset (ira_reg_equiv_const, 0, max_regno * sizeof (rtx));
3593 find_reg_equiv_invariant_const ();
3594 if (rebuild_p)
3596 timevar_push (TV_JUMP);
3597 rebuild_jump_labels (get_insns ());
3598 if (purge_all_dead_edges ())
3599 delete_unreachable_blocks ();
3600 timevar_pop (TV_JUMP);
3604 max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
3605 ira_setup_eliminable_regset ();
3607 ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
3608 ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
3609 ira_move_loops_num = ira_additional_jumps_num = 0;
3611 ira_assert (current_loops == NULL);
3612 flow_loops_find (&ira_loops);
3613 record_loop_exits ();
3614 current_loops = &ira_loops;
3616 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3617 fprintf (ira_dump_file, "Building IRA IR\n");
3618 loops_p = ira_build (optimize
3619 && (flag_ira_region == IRA_REGION_ALL
3620 || flag_ira_region == IRA_REGION_MIXED));
3622 ira_assert (ira_conflicts_p || !loops_p);
3624 saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
3625 if (too_high_register_pressure_p () || cfun->calls_setjmp)
3626 /* It is just wasting compiler's time to pack spilled pseudos into
3627 stack slots in this case -- prohibit it. We also do this if
3628 there is setjmp call because a variable not modified between
3629 setjmp and longjmp the compiler is required to preserve its
3630 value and sharing slots does not guarantee it. */
3631 flag_ira_share_spill_slots = FALSE;
3633 ira_color ();
3635 ira_max_point_before_emit = ira_max_point;
3637 ira_initiate_emit_data ();
3639 ira_emit (loops_p);
3641 if (ira_conflicts_p)
3643 max_regno = max_reg_num ();
3645 if (! loops_p)
3646 ira_initiate_assign ();
3647 else
3649 expand_reg_info (allocated_reg_info_size);
3650 setup_preferred_alternate_classes_for_new_pseudos
3651 (allocated_reg_info_size);
3652 allocated_reg_info_size = max_regno;
3654 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3655 fprintf (ira_dump_file, "Flattening IR\n");
3656 ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
3657 /* New insns were generated: add notes and recalculate live
3658 info. */
3659 df_analyze ();
3661 flow_loops_find (&ira_loops);
3662 record_loop_exits ();
3663 current_loops = &ira_loops;
3665 setup_allocno_assignment_flags ();
3666 ira_initiate_assign ();
3667 ira_reassign_conflict_allocnos (max_regno);
3671 ira_finish_emit_data ();
3673 setup_reg_renumber ();
3675 calculate_allocation_cost ();
3677 #ifdef ENABLE_IRA_CHECKING
3678 if (ira_conflicts_p)
3679 check_allocation ();
3680 #endif
3682 if (delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3683 df_analyze ();
3685 if (max_regno != max_regno_before_ira)
3687 regstat_free_n_sets_and_refs ();
3688 regstat_free_ri ();
3689 regstat_init_n_sets_and_refs ();
3690 regstat_compute_ri ();
3693 overall_cost_before = ira_overall_cost;
3694 if (! ira_conflicts_p)
3695 grow_reg_equivs ();
3696 else
3698 fix_reg_equiv_init ();
3700 #ifdef ENABLE_IRA_CHECKING
3701 print_redundant_copies ();
3702 #endif
3704 ira_spilled_reg_stack_slots_num = 0;
3705 ira_spilled_reg_stack_slots
3706 = ((struct ira_spilled_reg_stack_slot *)
3707 ira_allocate (max_regno
3708 * sizeof (struct ira_spilled_reg_stack_slot)));
3709 memset (ira_spilled_reg_stack_slots, 0,
3710 max_regno * sizeof (struct ira_spilled_reg_stack_slot));
3712 allocate_initial_values (reg_equivs);
3714 timevar_pop (TV_IRA);
3716 timevar_push (TV_RELOAD);
3717 df_set_flags (DF_NO_INSN_RESCAN);
3718 build_insn_chain ();
3720 reload_completed = !reload (get_insns (), ira_conflicts_p);
3722 timevar_pop (TV_RELOAD);
3724 timevar_push (TV_IRA);
3726 if (ira_conflicts_p)
3728 ira_free (ira_spilled_reg_stack_slots);
3730 ira_finish_assign ();
3733 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
3734 && overall_cost_before != ira_overall_cost)
3735 fprintf (ira_dump_file, "+++Overall after reload %d\n", ira_overall_cost);
3736 ira_destroy ();
3738 flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
3740 flow_loops_free (&ira_loops);
3741 free_dominance_info (CDI_DOMINATORS);
3742 FOR_ALL_BB (bb)
3743 bb->loop_father = NULL;
3744 current_loops = NULL;
3746 regstat_free_ri ();
3747 regstat_free_n_sets_and_refs ();
3749 if (optimize)
3751 cleanup_cfg (CLEANUP_EXPENSIVE);
3753 ira_free (ira_reg_equiv_invariant_p);
3754 ira_free (ira_reg_equiv_const);
3757 bitmap_obstack_release (&ira_bitmap_obstack);
3758 #ifndef IRA_NO_OBSTACK
3759 obstack_free (&ira_obstack, NULL);
3760 #endif
3762 /* The code after the reload has changed so much that at this point
3763 we might as well just rescan everything. Not that
3764 df_rescan_all_insns is not going to help here because it does not
3765 touch the artificial uses and defs. */
3766 df_finish_pass (true);
3767 if (optimize > 1)
3768 df_live_add_problem ();
3769 df_scan_alloc (NULL);
3770 df_scan_blocks ();
3772 if (optimize)
3773 df_analyze ();
3775 timevar_pop (TV_IRA);
3780 static bool
3781 gate_ira (void)
3783 return true;
3786 /* Run the integrated register allocator. */
3787 static unsigned int
3788 rest_of_handle_ira (void)
3790 ira (dump_file);
3791 return 0;
3794 struct rtl_opt_pass pass_ira =
3797 RTL_PASS,
3798 "ira", /* name */
3799 gate_ira, /* gate */
3800 rest_of_handle_ira, /* execute */
3801 NULL, /* sub */
3802 NULL, /* next */
3803 0, /* static_pass_number */
3804 TV_NONE, /* tv_id */
3805 0, /* properties_required */
3806 0, /* properties_provided */
3807 0, /* properties_destroyed */
3808 0, /* todo_flags_start */
3809 TODO_dump_func |
3810 TODO_ggc_collect /* todo_flags_finish */