1 /* Integrated Register Allocator (IRA) entry point.
2 Copyright (C) 2006-2021 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The integrated register allocator (IRA) is a
22 regional register allocator performing graph coloring on a top-down
23 traversal of nested regions. Graph coloring in a region is based
24 on Chaitin-Briggs algorithm. It is called integrated because
25 register coalescing, register live range splitting, and choosing a
26 better hard register are done on-the-fly during coloring. Register
27 coalescing and choosing a cheaper hard register is done by hard
28 register preferencing during hard register assigning. The live
29 range splitting is a byproduct of the regional register allocation.
31 Major IRA notions are:
33 o *Region* is a part of CFG where graph coloring based on
34 Chaitin-Briggs algorithm is done. IRA can work on any set of
35 nested CFG regions forming a tree. Currently the regions are
36 the entire function for the root region and natural loops for
37 the other regions. Therefore data structure representing a
38 region is called loop_tree_node.
40 o *Allocno class* is a register class used for allocation of
41 given allocno. It means that only hard register of given
42 register class can be assigned to given allocno. In reality,
43 even smaller subset of (*profitable*) hard registers can be
44 assigned. In rare cases, the subset can be even smaller
45 because our modification of Chaitin-Briggs algorithm requires
46 that sets of hard registers can be assigned to allocnos forms a
47 forest, i.e. the sets can be ordered in a way where any
48 previous set is not intersected with given set or is a superset
51 o *Pressure class* is a register class belonging to a set of
52 register classes containing all of the hard-registers available
53 for register allocation. The set of all pressure classes for a
54 target is defined in the corresponding machine-description file
55 according some criteria. Register pressure is calculated only
56 for pressure classes and it affects some IRA decisions as
57 forming allocation regions.
59 o *Allocno* represents the live range of a pseudo-register in a
60 region. Besides the obvious attributes like the corresponding
61 pseudo-register number, allocno class, conflicting allocnos and
62 conflicting hard-registers, there are a few allocno attributes
63 which are important for understanding the allocation algorithm:
65 - *Live ranges*. This is a list of ranges of *program points*
66 where the allocno lives. Program points represent places
67 where a pseudo can be born or become dead (there are
68 approximately two times more program points than the insns)
69 and they are represented by integers starting with 0. The
70 live ranges are used to find conflicts between allocnos.
71 They also play very important role for the transformation of
72 the IRA internal representation of several regions into a one
73 region representation. The later is used during the reload
74 pass work because each allocno represents all of the
75 corresponding pseudo-registers.
77 - *Hard-register costs*. This is a vector of size equal to the
78 number of available hard-registers of the allocno class. The
79 cost of a callee-clobbered hard-register for an allocno is
80 increased by the cost of save/restore code around the calls
81 through the given allocno's life. If the allocno is a move
82 instruction operand and another operand is a hard-register of
83 the allocno class, the cost of the hard-register is decreased
86 When an allocno is assigned, the hard-register with minimal
87 full cost is used. Initially, a hard-register's full cost is
88 the corresponding value from the hard-register's cost vector.
89 If the allocno is connected by a *copy* (see below) to
90 another allocno which has just received a hard-register, the
91 cost of the hard-register is decreased. Before choosing a
92 hard-register for an allocno, the allocno's current costs of
93 the hard-registers are modified by the conflict hard-register
94 costs of all of the conflicting allocnos which are not
97 - *Conflict hard-register costs*. This is a vector of the same
98 size as the hard-register costs vector. To permit an
99 unassigned allocno to get a better hard-register, IRA uses
100 this vector to calculate the final full cost of the
101 available hard-registers. Conflict hard-register costs of an
102 unassigned allocno are also changed with a change of the
103 hard-register cost of the allocno when a copy involving the
104 allocno is processed as described above. This is done to
105 show other unassigned allocnos that a given allocno prefers
106 some hard-registers in order to remove the move instruction
107 corresponding to the copy.
109 o *Cap*. If a pseudo-register does not live in a region but
110 lives in a nested region, IRA creates a special allocno called
111 a cap in the outer region. A region cap is also created for a
114 o *Copy*. Allocnos can be connected by copies. Copies are used
115 to modify hard-register costs for allocnos during coloring.
116 Such modifications reflects a preference to use the same
117 hard-register for the allocnos connected by copies. Usually
118 copies are created for move insns (in this case it results in
119 register coalescing). But IRA also creates copies for operands
120 of an insn which should be assigned to the same hard-register
121 due to constraints in the machine description (it usually
122 results in removing a move generated in reload to satisfy
123 the constraints) and copies referring to the allocno which is
124 the output operand of an instruction and the allocno which is
125 an input operand dying in the instruction (creation of such
126 copies results in less register shuffling). IRA *does not*
127 create copies between the same register allocnos from different
128 regions because we use another technique for propagating
129 hard-register preference on the borders of regions.
131 Allocnos (including caps) for the upper region in the region tree
132 *accumulate* information important for coloring from allocnos with
133 the same pseudo-register from nested regions. This includes
134 hard-register and memory costs, conflicts with hard-registers,
135 allocno conflicts, allocno copies and more. *Thus, attributes for
136 allocnos in a region have the same values as if the region had no
137 subregions*. It means that attributes for allocnos in the
138 outermost region corresponding to the function have the same values
139 as though the allocation used only one region which is the entire
140 function. It also means that we can look at IRA work as if the
141 first IRA did allocation for all function then it improved the
142 allocation for loops then their subloops and so on.
144 IRA major passes are:
146 o Building IRA internal representation which consists of the
149 * First, IRA builds regions and creates allocnos (file
150 ira-build.c) and initializes most of their attributes.
152 * Then IRA finds an allocno class for each allocno and
153 calculates its initial (non-accumulated) cost of memory and
154 each hard-register of its allocno class (file ira-cost.c).
156 * IRA creates live ranges of each allocno, calculates register
157 pressure for each pressure class in each region, sets up
158 conflict hard registers for each allocno and info about calls
159 the allocno lives through (file ira-lives.c).
161 * IRA removes low register pressure loops from the regions
162 mostly to speed IRA up (file ira-build.c).
164 * IRA propagates accumulated allocno info from lower region
165 allocnos to corresponding upper region allocnos (file
168 * IRA creates all caps (file ira-build.c).
170 * Having live-ranges of allocnos and their classes, IRA creates
171 conflicting allocnos for each allocno. Conflicting allocnos
172 are stored as a bit vector or array of pointers to the
173 conflicting allocnos whatever is more profitable (file
174 ira-conflicts.c). At this point IRA creates allocno copies.
176 o Coloring. Now IRA has all necessary info to start graph coloring
177 process. It is done in each region on top-down traverse of the
178 region tree (file ira-color.c). There are following subpasses:
180 * Finding profitable hard registers of corresponding allocno
181 class for each allocno. For example, only callee-saved hard
182 registers are frequently profitable for allocnos living
183 through colors. If the profitable hard register set of
184 allocno does not form a tree based on subset relation, we use
185 some approximation to form the tree. This approximation is
186 used to figure out trivial colorability of allocnos. The
187 approximation is a pretty rare case.
189 * Putting allocnos onto the coloring stack. IRA uses Briggs
190 optimistic coloring which is a major improvement over
191 Chaitin's coloring. Therefore IRA does not spill allocnos at
192 this point. There is some freedom in the order of putting
193 allocnos on the stack which can affect the final result of
194 the allocation. IRA uses some heuristics to improve the
195 order. The major one is to form *threads* from colorable
196 allocnos and push them on the stack by threads. Thread is a
197 set of non-conflicting colorable allocnos connected by
198 copies. The thread contains allocnos from the colorable
199 bucket or colorable allocnos already pushed onto the coloring
200 stack. Pushing thread allocnos one after another onto the
201 stack increases chances of removing copies when the allocnos
202 get the same hard reg.
204 We also use a modification of Chaitin-Briggs algorithm which
205 works for intersected register classes of allocnos. To
206 figure out trivial colorability of allocnos, the mentioned
207 above tree of hard register sets is used. To get an idea how
208 the algorithm works in i386 example, let us consider an
209 allocno to which any general hard register can be assigned.
210 If the allocno conflicts with eight allocnos to which only
211 EAX register can be assigned, given allocno is still
212 trivially colorable because all conflicting allocnos might be
213 assigned only to EAX and all other general hard registers are
216 To get an idea of the used trivial colorability criterion, it
217 is also useful to read article "Graph-Coloring Register
218 Allocation for Irregular Architectures" by Michael D. Smith
219 and Glen Holloway. Major difference between the article
220 approach and approach used in IRA is that Smith's approach
221 takes register classes only from machine description and IRA
222 calculate register classes from intermediate code too
223 (e.g. an explicit usage of hard registers in RTL code for
224 parameter passing can result in creation of additional
225 register classes which contain or exclude the hard
226 registers). That makes IRA approach useful for improving
227 coloring even for architectures with regular register files
228 and in fact some benchmarking shows the improvement for
229 regular class architectures is even bigger than for irregular
230 ones. Another difference is that Smith's approach chooses
231 intersection of classes of all insn operands in which a given
232 pseudo occurs. IRA can use bigger classes if it is still
233 more profitable than memory usage.
235 * Popping the allocnos from the stack and assigning them hard
236 registers. If IRA cannot assign a hard register to an
237 allocno and the allocno is coalesced, IRA undoes the
238 coalescing and puts the uncoalesced allocnos onto the stack in
239 the hope that some such allocnos will get a hard register
240 separately. If IRA fails to assign hard register or memory
241 is more profitable for it, IRA spills the allocno. IRA
242 assigns the allocno the hard-register with minimal full
243 allocation cost which reflects the cost of usage of the
244 hard-register for the allocno and cost of usage of the
245 hard-register for allocnos conflicting with given allocno.
247 * Chaitin-Briggs coloring assigns as many pseudos as possible
248 to hard registers. After coloring we try to improve
249 allocation with cost point of view. We improve the
250 allocation by spilling some allocnos and assigning the freed
251 hard registers to other allocnos if it decreases the overall
254 * After allocno assigning in the region, IRA modifies the hard
255 register and memory costs for the corresponding allocnos in
256 the subregions to reflect the cost of possible loads, stores,
257 or moves on the border of the region and its subregions.
258 When default regional allocation algorithm is used
259 (-fira-algorithm=mixed), IRA just propagates the assignment
260 for allocnos if the register pressure in the region for the
261 corresponding pressure class is less than number of available
262 hard registers for given pressure class.
264 o Spill/restore code moving. When IRA performs an allocation
265 by traversing regions in top-down order, it does not know what
266 happens below in the region tree. Therefore, sometimes IRA
267 misses opportunities to perform a better allocation. A simple
268 optimization tries to improve allocation in a region having
269 subregions and containing in another region. If the
270 corresponding allocnos in the subregion are spilled, it spills
271 the region allocno if it is profitable. The optimization
272 implements a simple iterative algorithm performing profitable
273 transformations while they are still possible. It is fast in
274 practice, so there is no real need for a better time complexity
277 o Code change. After coloring, two allocnos representing the
278 same pseudo-register outside and inside a region respectively
279 may be assigned to different locations (hard-registers or
280 memory). In this case IRA creates and uses a new
281 pseudo-register inside the region and adds code to move allocno
282 values on the region's borders. This is done during top-down
283 traversal of the regions (file ira-emit.c). In some
284 complicated cases IRA can create a new allocno to move allocno
285 values (e.g. when a swap of values stored in two hard-registers
286 is needed). At this stage, the new allocno is marked as
287 spilled. IRA still creates the pseudo-register and the moves
288 on the region borders even when both allocnos were assigned to
289 the same hard-register. If the reload pass spills a
290 pseudo-register for some reason, the effect will be smaller
291 because another allocno will still be in the hard-register. In
292 most cases, this is better then spilling both allocnos. If
293 reload does not change the allocation for the two
294 pseudo-registers, the trivial move will be removed by
295 post-reload optimizations. IRA does not generate moves for
296 allocnos assigned to the same hard register when the default
297 regional allocation algorithm is used and the register pressure
298 in the region for the corresponding pressure class is less than
299 number of available hard registers for given pressure class.
300 IRA also does some optimizations to remove redundant stores and
301 to reduce code duplication on the region borders.
303 o Flattening internal representation. After changing code, IRA
304 transforms its internal representation for several regions into
305 one region representation (file ira-build.c). This process is
306 called IR flattening. Such process is more complicated than IR
307 rebuilding would be, but is much faster.
309 o After IR flattening, IRA tries to assign hard registers to all
310 spilled allocnos. This is implemented by a simple and fast
311 priority coloring algorithm (see function
312 ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos
313 created during the code change pass can be assigned to hard
316 o At the end IRA calls the reload pass. The reload pass
317 communicates with IRA through several functions in file
318 ira-color.c to improve its decisions in
320 * sharing stack slots for the spilled pseudos based on IRA info
321 about pseudo-register conflicts.
323 * reassigning hard-registers to all spilled pseudos at the end
324 of each reload iteration.
326 * choosing a better hard-register to spill based on IRA info
327 about pseudo-register live ranges and the register pressure
328 in places where the pseudo-register lives.
330 IRA uses a lot of data representing the target processors. These
331 data are initialized in file ira.c.
333 If function has no loops (or the loops are ignored when
334 -fira-algorithm=CB is used), we have classic Chaitin-Briggs
335 coloring (only instead of separate pass of coalescing, we use hard
336 register preferencing). In such case, IRA works much faster
337 because many things are not made (like IR flattening, the
338 spill/restore optimization, and the code change).
340 Literature is worth to read for better understanding the code:
342 o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to
343 Graph Coloring Register Allocation.
345 o David Callahan, Brian Koblenz. Register allocation via
346 hierarchical graph coloring.
348 o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
349 Coloring Register Allocation: A Study of the Chaitin-Briggs and
350 Callahan-Koblenz Algorithms.
352 o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
353 Register Allocation Based on Graph Fusion.
355 o Michael D. Smith and Glenn Holloway. Graph-Coloring Register
356 Allocation for Irregular Architectures
358 o Vladimir Makarov. The Integrated Register Allocator for GCC.
360 o Vladimir Makarov. The top-down register allocator for irregular
361 register file architectures.
368 #include "coretypes.h"
374 #include "memmodel.h"
376 #include "insn-config.h"
380 #include "diagnostic-core.h"
382 #include "cfgbuild.h"
383 #include "cfgcleanup.h"
385 #include "tree-pass.h"
392 #include "rtl-iter.h"
393 #include "shrink-wrap.h"
394 #include "print-rtl.h"
396 struct target_ira default_target_ira
;
397 class target_ira_int default_target_ira_int
;
398 #if SWITCHABLE_TARGET
399 struct target_ira
*this_target_ira
= &default_target_ira
;
400 class target_ira_int
*this_target_ira_int
= &default_target_ira_int
;
403 /* A modified value of flag `-fira-verbose' used internally. */
404 int internal_flag_ira_verbose
;
406 /* Dump file of the allocator if it is not NULL. */
409 /* The number of elements in the following array. */
410 int ira_spilled_reg_stack_slots_num
;
412 /* The following array contains info about spilled pseudo-registers
413 stack slots used in current function so far. */
414 class ira_spilled_reg_stack_slot
*ira_spilled_reg_stack_slots
;
416 /* Correspondingly overall cost of the allocation, overall cost before
417 reload, cost of the allocnos assigned to hard-registers, cost of
418 the allocnos assigned to memory, cost of loads, stores and register
419 move insns generated for pseudo-register live range splitting (see
421 int64_t ira_overall_cost
, overall_cost_before
;
422 int64_t ira_reg_cost
, ira_mem_cost
;
423 int64_t ira_load_cost
, ira_store_cost
, ira_shuffle_cost
;
424 int ira_move_loops_num
, ira_additional_jumps_num
;
426 /* All registers that can be eliminated. */
428 HARD_REG_SET eliminable_regset
;
430 /* Value of max_reg_num () before IRA work start. This value helps
431 us to recognize a situation when new pseudos were created during
433 static int max_regno_before_ira
;
435 /* Temporary hard reg set used for a different calculation. */
436 static HARD_REG_SET temp_hard_regset
;
438 #define last_mode_for_init_move_cost \
439 (this_target_ira_int->x_last_mode_for_init_move_cost)
442 /* The function sets up the map IRA_REG_MODE_HARD_REGSET. */
444 setup_reg_mode_hard_regset (void)
446 int i
, m
, hard_regno
;
448 for (m
= 0; m
< NUM_MACHINE_MODES
; m
++)
449 for (hard_regno
= 0; hard_regno
< FIRST_PSEUDO_REGISTER
; hard_regno
++)
451 CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset
[hard_regno
][m
]);
452 for (i
= hard_regno_nregs (hard_regno
, (machine_mode
) m
) - 1;
454 if (hard_regno
+ i
< FIRST_PSEUDO_REGISTER
)
455 SET_HARD_REG_BIT (ira_reg_mode_hard_regset
[hard_regno
][m
],
461 #define no_unit_alloc_regs \
462 (this_target_ira_int->x_no_unit_alloc_regs)
464 /* The function sets up the three arrays declared above. */
466 setup_class_hard_regs (void)
468 int cl
, i
, hard_regno
, n
;
469 HARD_REG_SET processed_hard_reg_set
;
471 ira_assert (SHRT_MAX
>= FIRST_PSEUDO_REGISTER
);
472 for (cl
= (int) N_REG_CLASSES
- 1; cl
>= 0; cl
--)
474 temp_hard_regset
= reg_class_contents
[cl
] & ~no_unit_alloc_regs
;
475 CLEAR_HARD_REG_SET (processed_hard_reg_set
);
476 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
478 ira_non_ordered_class_hard_regs
[cl
][i
] = -1;
479 ira_class_hard_reg_index
[cl
][i
] = -1;
481 for (n
= 0, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
483 #ifdef REG_ALLOC_ORDER
484 hard_regno
= reg_alloc_order
[i
];
488 if (TEST_HARD_REG_BIT (processed_hard_reg_set
, hard_regno
))
490 SET_HARD_REG_BIT (processed_hard_reg_set
, hard_regno
);
491 if (! TEST_HARD_REG_BIT (temp_hard_regset
, hard_regno
))
492 ira_class_hard_reg_index
[cl
][hard_regno
] = -1;
495 ira_class_hard_reg_index
[cl
][hard_regno
] = n
;
496 ira_class_hard_regs
[cl
][n
++] = hard_regno
;
499 ira_class_hard_regs_num
[cl
] = n
;
500 for (n
= 0, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
501 if (TEST_HARD_REG_BIT (temp_hard_regset
, i
))
502 ira_non_ordered_class_hard_regs
[cl
][n
++] = i
;
503 ira_assert (ira_class_hard_regs_num
[cl
] == n
);
507 /* Set up global variables defining info about hard registers for the
508 allocation. These depend on USE_HARD_FRAME_P whose TRUE value means
509 that we can use the hard frame pointer for the allocation. */
511 setup_alloc_regs (bool use_hard_frame_p
)
513 #ifdef ADJUST_REG_ALLOC_ORDER
514 ADJUST_REG_ALLOC_ORDER
;
516 no_unit_alloc_regs
= fixed_nonglobal_reg_set
;
517 if (! use_hard_frame_p
)
518 add_to_hard_reg_set (&no_unit_alloc_regs
, Pmode
,
519 HARD_FRAME_POINTER_REGNUM
);
520 setup_class_hard_regs ();
525 #define alloc_reg_class_subclasses \
526 (this_target_ira_int->x_alloc_reg_class_subclasses)
528 /* Initialize the table of subclasses of each reg class. */
530 setup_reg_subclasses (void)
533 HARD_REG_SET temp_hard_regset2
;
535 for (i
= 0; i
< N_REG_CLASSES
; i
++)
536 for (j
= 0; j
< N_REG_CLASSES
; j
++)
537 alloc_reg_class_subclasses
[i
][j
] = LIM_REG_CLASSES
;
539 for (i
= 0; i
< N_REG_CLASSES
; i
++)
541 if (i
== (int) NO_REGS
)
544 temp_hard_regset
= reg_class_contents
[i
] & ~no_unit_alloc_regs
;
545 if (hard_reg_set_empty_p (temp_hard_regset
))
547 for (j
= 0; j
< N_REG_CLASSES
; j
++)
552 temp_hard_regset2
= reg_class_contents
[j
] & ~no_unit_alloc_regs
;
553 if (! hard_reg_set_subset_p (temp_hard_regset
,
556 p
= &alloc_reg_class_subclasses
[j
][0];
557 while (*p
!= LIM_REG_CLASSES
) p
++;
558 *p
= (enum reg_class
) i
;
565 /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST. */
567 setup_class_subset_and_memory_move_costs (void)
569 int cl
, cl2
, mode
, cost
;
570 HARD_REG_SET temp_hard_regset2
;
572 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
573 ira_memory_move_cost
[mode
][NO_REGS
][0]
574 = ira_memory_move_cost
[mode
][NO_REGS
][1] = SHRT_MAX
;
575 for (cl
= (int) N_REG_CLASSES
- 1; cl
>= 0; cl
--)
577 if (cl
!= (int) NO_REGS
)
578 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
580 ira_max_memory_move_cost
[mode
][cl
][0]
581 = ira_memory_move_cost
[mode
][cl
][0]
582 = memory_move_cost ((machine_mode
) mode
,
583 (reg_class_t
) cl
, false);
584 ira_max_memory_move_cost
[mode
][cl
][1]
585 = ira_memory_move_cost
[mode
][cl
][1]
586 = memory_move_cost ((machine_mode
) mode
,
587 (reg_class_t
) cl
, true);
588 /* Costs for NO_REGS are used in cost calculation on the
589 1st pass when the preferred register classes are not
590 known yet. In this case we take the best scenario. */
591 if (ira_memory_move_cost
[mode
][NO_REGS
][0]
592 > ira_memory_move_cost
[mode
][cl
][0])
593 ira_max_memory_move_cost
[mode
][NO_REGS
][0]
594 = ira_memory_move_cost
[mode
][NO_REGS
][0]
595 = ira_memory_move_cost
[mode
][cl
][0];
596 if (ira_memory_move_cost
[mode
][NO_REGS
][1]
597 > ira_memory_move_cost
[mode
][cl
][1])
598 ira_max_memory_move_cost
[mode
][NO_REGS
][1]
599 = ira_memory_move_cost
[mode
][NO_REGS
][1]
600 = ira_memory_move_cost
[mode
][cl
][1];
603 for (cl
= (int) N_REG_CLASSES
- 1; cl
>= 0; cl
--)
604 for (cl2
= (int) N_REG_CLASSES
- 1; cl2
>= 0; cl2
--)
606 temp_hard_regset
= reg_class_contents
[cl
] & ~no_unit_alloc_regs
;
607 temp_hard_regset2
= reg_class_contents
[cl2
] & ~no_unit_alloc_regs
;
608 ira_class_subset_p
[cl
][cl2
]
609 = hard_reg_set_subset_p (temp_hard_regset
, temp_hard_regset2
);
610 if (! hard_reg_set_empty_p (temp_hard_regset2
)
611 && hard_reg_set_subset_p (reg_class_contents
[cl2
],
612 reg_class_contents
[cl
]))
613 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
615 cost
= ira_memory_move_cost
[mode
][cl2
][0];
616 if (cost
> ira_max_memory_move_cost
[mode
][cl
][0])
617 ira_max_memory_move_cost
[mode
][cl
][0] = cost
;
618 cost
= ira_memory_move_cost
[mode
][cl2
][1];
619 if (cost
> ira_max_memory_move_cost
[mode
][cl
][1])
620 ira_max_memory_move_cost
[mode
][cl
][1] = cost
;
623 for (cl
= (int) N_REG_CLASSES
- 1; cl
>= 0; cl
--)
624 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
626 ira_memory_move_cost
[mode
][cl
][0]
627 = ira_max_memory_move_cost
[mode
][cl
][0];
628 ira_memory_move_cost
[mode
][cl
][1]
629 = ira_max_memory_move_cost
[mode
][cl
][1];
631 setup_reg_subclasses ();
636 /* Define the following macro if allocation through malloc if
638 #define IRA_NO_OBSTACK
640 #ifndef IRA_NO_OBSTACK
641 /* Obstack used for storing all dynamic data (except bitmaps) of the
643 static struct obstack ira_obstack
;
646 /* Obstack used for storing all bitmaps of the IRA. */
647 static struct bitmap_obstack ira_bitmap_obstack
;
649 /* Allocate memory of size LEN for IRA data. */
651 ira_allocate (size_t len
)
655 #ifndef IRA_NO_OBSTACK
656 res
= obstack_alloc (&ira_obstack
, len
);
663 /* Free memory ADDR allocated for IRA data. */
665 ira_free (void *addr ATTRIBUTE_UNUSED
)
667 #ifndef IRA_NO_OBSTACK
675 /* Allocate and returns bitmap for IRA. */
677 ira_allocate_bitmap (void)
679 return BITMAP_ALLOC (&ira_bitmap_obstack
);
682 /* Free bitmap B allocated for IRA. */
684 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED
)
691 /* Output information about allocation of all allocnos (except for
692 caps) into file F. */
694 ira_print_disposition (FILE *f
)
700 fprintf (f
, "Disposition:");
701 max_regno
= max_reg_num ();
702 for (n
= 0, i
= FIRST_PSEUDO_REGISTER
; i
< max_regno
; i
++)
703 for (a
= ira_regno_allocno_map
[i
];
705 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
710 fprintf (f
, " %4d:r%-4d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
711 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
712 fprintf (f
, "b%-3d", bb
->index
);
714 fprintf (f
, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
715 if (ALLOCNO_HARD_REGNO (a
) >= 0)
716 fprintf (f
, " %3d", ALLOCNO_HARD_REGNO (a
));
723 /* Outputs information about allocation of all allocnos into
726 ira_debug_disposition (void)
728 ira_print_disposition (stderr
);
733 /* Set up ira_stack_reg_pressure_class which is the biggest pressure
734 register class containing stack registers or NO_REGS if there are
735 no stack registers. To find this class, we iterate through all
736 register pressure classes and choose the first register pressure
737 class containing all the stack registers and having the biggest
740 setup_stack_reg_pressure_class (void)
742 ira_stack_reg_pressure_class
= NO_REGS
;
747 HARD_REG_SET temp_hard_regset2
;
749 CLEAR_HARD_REG_SET (temp_hard_regset
);
750 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
751 SET_HARD_REG_BIT (temp_hard_regset
, i
);
753 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
755 cl
= ira_pressure_classes
[i
];
756 temp_hard_regset2
= temp_hard_regset
& reg_class_contents
[cl
];
757 size
= hard_reg_set_size (temp_hard_regset2
);
761 ira_stack_reg_pressure_class
= cl
;
768 /* Find pressure classes which are register classes for which we
769 calculate register pressure in IRA, register pressure sensitive
770 insn scheduling, and register pressure sensitive loop invariant
773 To make register pressure calculation easy, we always use
774 non-intersected register pressure classes. A move of hard
775 registers from one register pressure class is not more expensive
776 than load and store of the hard registers. Most likely an allocno
777 class will be a subset of a register pressure class and in many
778 cases a register pressure class. That makes usage of register
779 pressure classes a good approximation to find a high register
782 setup_pressure_classes (void)
784 int cost
, i
, n
, curr
;
786 enum reg_class pressure_classes
[N_REG_CLASSES
];
788 HARD_REG_SET temp_hard_regset2
;
791 if (targetm
.compute_pressure_classes
)
792 n
= targetm
.compute_pressure_classes (pressure_classes
);
796 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
798 if (ira_class_hard_regs_num
[cl
] == 0)
800 if (ira_class_hard_regs_num
[cl
] != 1
801 /* A register class without subclasses may contain a few
802 hard registers and movement between them is costly
803 (e.g. SPARC FPCC registers). We still should consider it
804 as a candidate for a pressure class. */
805 && alloc_reg_class_subclasses
[cl
][0] < cl
)
807 /* Check that the moves between any hard registers of the
808 current class are not more expensive for a legal mode
809 than load/store of the hard registers of the current
810 class. Such class is a potential candidate to be a
811 register pressure class. */
812 for (m
= 0; m
< NUM_MACHINE_MODES
; m
++)
815 = (reg_class_contents
[cl
]
816 & ~(no_unit_alloc_regs
817 | ira_prohibited_class_mode_regs
[cl
][m
]));
818 if (hard_reg_set_empty_p (temp_hard_regset
))
820 ira_init_register_move_cost_if_necessary ((machine_mode
) m
);
821 cost
= ira_register_move_cost
[m
][cl
][cl
];
822 if (cost
<= ira_max_memory_move_cost
[m
][cl
][1]
823 || cost
<= ira_max_memory_move_cost
[m
][cl
][0])
826 if (m
>= NUM_MACHINE_MODES
)
831 temp_hard_regset
= reg_class_contents
[cl
] & ~no_unit_alloc_regs
;
832 /* Remove so far added pressure classes which are subset of the
833 current candidate class. Prefer GENERAL_REGS as a pressure
834 register class to another class containing the same
835 allocatable hard registers. We do this because machine
836 dependent cost hooks might give wrong costs for the latter
837 class but always give the right cost for the former class
839 for (i
= 0; i
< n
; i
++)
841 cl2
= pressure_classes
[i
];
842 temp_hard_regset2
= (reg_class_contents
[cl2
]
843 & ~no_unit_alloc_regs
);
844 if (hard_reg_set_subset_p (temp_hard_regset
, temp_hard_regset2
)
845 && (temp_hard_regset
!= temp_hard_regset2
846 || cl2
== (int) GENERAL_REGS
))
848 pressure_classes
[curr
++] = (enum reg_class
) cl2
;
852 if (hard_reg_set_subset_p (temp_hard_regset2
, temp_hard_regset
)
853 && (temp_hard_regset2
!= temp_hard_regset
854 || cl
== (int) GENERAL_REGS
))
856 if (temp_hard_regset2
== temp_hard_regset
)
858 pressure_classes
[curr
++] = (enum reg_class
) cl2
;
860 /* If the current candidate is a subset of a so far added
861 pressure class, don't add it to the list of the pressure
864 pressure_classes
[curr
++] = (enum reg_class
) cl
;
868 #ifdef ENABLE_IRA_CHECKING
870 HARD_REG_SET ignore_hard_regs
;
872 /* Check pressure classes correctness: here we check that hard
873 registers from all register pressure classes contains all hard
874 registers available for the allocation. */
875 CLEAR_HARD_REG_SET (temp_hard_regset
);
876 CLEAR_HARD_REG_SET (temp_hard_regset2
);
877 ignore_hard_regs
= no_unit_alloc_regs
;
878 for (cl
= 0; cl
< LIM_REG_CLASSES
; cl
++)
880 /* For some targets (like MIPS with MD_REGS), there are some
881 classes with hard registers available for allocation but
882 not able to hold value of any mode. */
883 for (m
= 0; m
< NUM_MACHINE_MODES
; m
++)
884 if (contains_reg_of_mode
[cl
][m
])
886 if (m
>= NUM_MACHINE_MODES
)
888 ignore_hard_regs
|= reg_class_contents
[cl
];
891 for (i
= 0; i
< n
; i
++)
892 if ((int) pressure_classes
[i
] == cl
)
894 temp_hard_regset2
|= reg_class_contents
[cl
];
896 temp_hard_regset
|= reg_class_contents
[cl
];
898 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
899 /* Some targets (like SPARC with ICC reg) have allocatable regs
900 for which no reg class is defined. */
901 if (REGNO_REG_CLASS (i
) == NO_REGS
)
902 SET_HARD_REG_BIT (ignore_hard_regs
, i
);
903 temp_hard_regset
&= ~ignore_hard_regs
;
904 temp_hard_regset2
&= ~ignore_hard_regs
;
905 ira_assert (hard_reg_set_subset_p (temp_hard_regset2
, temp_hard_regset
));
908 ira_pressure_classes_num
= 0;
909 for (i
= 0; i
< n
; i
++)
911 cl
= (int) pressure_classes
[i
];
912 ira_reg_pressure_class_p
[cl
] = true;
913 ira_pressure_classes
[ira_pressure_classes_num
++] = (enum reg_class
) cl
;
915 setup_stack_reg_pressure_class ();
918 /* Set up IRA_UNIFORM_CLASS_P. Uniform class is a register class
919 whose register move cost between any registers of the class is the
920 same as for all its subclasses. We use the data to speed up the
921 2nd pass of calculations of allocno costs. */
923 setup_uniform_class_p (void)
927 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
929 ira_uniform_class_p
[cl
] = false;
930 if (ira_class_hard_regs_num
[cl
] == 0)
932 /* We cannot use alloc_reg_class_subclasses here because move
933 cost hooks does not take into account that some registers are
934 unavailable for the subtarget. E.g. for i686, INT_SSE_REGS
935 is element of alloc_reg_class_subclasses for GENERAL_REGS
936 because SSE regs are unavailable. */
937 for (i
= 0; (cl2
= reg_class_subclasses
[cl
][i
]) != LIM_REG_CLASSES
; i
++)
939 if (ira_class_hard_regs_num
[cl2
] == 0)
941 for (m
= 0; m
< NUM_MACHINE_MODES
; m
++)
942 if (contains_reg_of_mode
[cl
][m
] && contains_reg_of_mode
[cl2
][m
])
944 ira_init_register_move_cost_if_necessary ((machine_mode
) m
);
945 if (ira_register_move_cost
[m
][cl
][cl
]
946 != ira_register_move_cost
[m
][cl2
][cl2
])
949 if (m
< NUM_MACHINE_MODES
)
952 if (cl2
== LIM_REG_CLASSES
)
953 ira_uniform_class_p
[cl
] = true;
957 /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM,
958 IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM.
960 Target may have many subtargets and not all target hard registers can
961 be used for allocation, e.g. x86 port in 32-bit mode cannot use
962 hard registers introduced in x86-64 like r8-r15). Some classes
963 might have the same allocatable hard registers, e.g. INDEX_REGS
964 and GENERAL_REGS in x86 port in 32-bit mode. To decrease different
965 calculations efforts we introduce allocno classes which contain
966 unique non-empty sets of allocatable hard-registers.
968 Pseudo class cost calculation in ira-costs.c is very expensive.
969 Therefore we are trying to decrease number of classes involved in
970 such calculation. Register classes used in the cost calculation
971 are called important classes. They are allocno classes and other
972 non-empty classes whose allocatable hard register sets are inside
973 of an allocno class hard register set. From the first sight, it
974 looks like that they are just allocno classes. It is not true. In
975 example of x86-port in 32-bit mode, allocno classes will contain
976 GENERAL_REGS but not LEGACY_REGS (because allocatable hard
977 registers are the same for the both classes). The important
978 classes will contain GENERAL_REGS and LEGACY_REGS. It is done
979 because a machine description insn constraint may refers for
980 LEGACY_REGS and code in ira-costs.c is mostly base on investigation
981 of the insn constraints. */
983 setup_allocno_and_important_classes (void)
987 HARD_REG_SET temp_hard_regset2
;
988 static enum reg_class classes
[LIM_REG_CLASSES
+ 1];
991 /* Collect classes which contain unique sets of allocatable hard
992 registers. Prefer GENERAL_REGS to other classes containing the
993 same set of hard registers. */
994 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
996 temp_hard_regset
= reg_class_contents
[i
] & ~no_unit_alloc_regs
;
997 for (j
= 0; j
< n
; j
++)
1000 temp_hard_regset2
= reg_class_contents
[cl
] & ~no_unit_alloc_regs
;
1001 if (temp_hard_regset
== temp_hard_regset2
)
1004 if (j
>= n
|| targetm
.additional_allocno_class_p (i
))
1005 classes
[n
++] = (enum reg_class
) i
;
1006 else if (i
== GENERAL_REGS
)
1007 /* Prefer general regs. For i386 example, it means that
1008 we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS
1009 (all of them consists of the same available hard
1011 classes
[j
] = (enum reg_class
) i
;
1013 classes
[n
] = LIM_REG_CLASSES
;
1015 /* Set up classes which can be used for allocnos as classes
1016 containing non-empty unique sets of allocatable hard
1018 ira_allocno_classes_num
= 0;
1019 for (i
= 0; (cl
= classes
[i
]) != LIM_REG_CLASSES
; i
++)
1020 if (ira_class_hard_regs_num
[cl
] > 0)
1021 ira_allocno_classes
[ira_allocno_classes_num
++] = (enum reg_class
) cl
;
1022 ira_important_classes_num
= 0;
1023 /* Add non-allocno classes containing to non-empty set of
1024 allocatable hard regs. */
1025 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1026 if (ira_class_hard_regs_num
[cl
] > 0)
1028 temp_hard_regset
= reg_class_contents
[cl
] & ~no_unit_alloc_regs
;
1030 for (j
= 0; j
< ira_allocno_classes_num
; j
++)
1032 temp_hard_regset2
= (reg_class_contents
[ira_allocno_classes
[j
]]
1033 & ~no_unit_alloc_regs
);
1034 if ((enum reg_class
) cl
== ira_allocno_classes
[j
])
1036 else if (hard_reg_set_subset_p (temp_hard_regset
,
1040 if (set_p
&& j
>= ira_allocno_classes_num
)
1041 ira_important_classes
[ira_important_classes_num
++]
1042 = (enum reg_class
) cl
;
1044 /* Now add allocno classes to the important classes. */
1045 for (j
= 0; j
< ira_allocno_classes_num
; j
++)
1046 ira_important_classes
[ira_important_classes_num
++]
1047 = ira_allocno_classes
[j
];
1048 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1050 ira_reg_allocno_class_p
[cl
] = false;
1051 ira_reg_pressure_class_p
[cl
] = false;
1053 for (j
= 0; j
< ira_allocno_classes_num
; j
++)
1054 ira_reg_allocno_class_p
[ira_allocno_classes
[j
]] = true;
1055 setup_pressure_classes ();
1056 setup_uniform_class_p ();
1059 /* Setup translation in CLASS_TRANSLATE of all classes into a class
1060 given by array CLASSES of length CLASSES_NUM. The function is used
1061 make translation any reg class to an allocno class or to an
1062 pressure class. This translation is necessary for some
1063 calculations when we can use only allocno or pressure classes and
1064 such translation represents an approximate representation of all
1067 The translation in case when allocatable hard register set of a
1068 given class is subset of allocatable hard register set of a class
1069 in CLASSES is pretty simple. We use smallest classes from CLASSES
1070 containing a given class. If allocatable hard register set of a
1071 given class is not a subset of any corresponding set of a class
1072 from CLASSES, we use the cheapest (with load/store point of view)
1073 class from CLASSES whose set intersects with given class set. */
1075 setup_class_translate_array (enum reg_class
*class_translate
,
1076 int classes_num
, enum reg_class
*classes
)
1079 enum reg_class aclass
, best_class
, *cl_ptr
;
1080 int i
, cost
, min_cost
, best_cost
;
1082 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1083 class_translate
[cl
] = NO_REGS
;
1085 for (i
= 0; i
< classes_num
; i
++)
1087 aclass
= classes
[i
];
1088 for (cl_ptr
= &alloc_reg_class_subclasses
[aclass
][0];
1089 (cl
= *cl_ptr
) != LIM_REG_CLASSES
;
1091 if (class_translate
[cl
] == NO_REGS
)
1092 class_translate
[cl
] = aclass
;
1093 class_translate
[aclass
] = aclass
;
1095 /* For classes which are not fully covered by one of given classes
1096 (in other words covered by more one given class), use the
1098 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1100 if (cl
== NO_REGS
|| class_translate
[cl
] != NO_REGS
)
1102 best_class
= NO_REGS
;
1103 best_cost
= INT_MAX
;
1104 for (i
= 0; i
< classes_num
; i
++)
1106 aclass
= classes
[i
];
1107 temp_hard_regset
= (reg_class_contents
[aclass
]
1108 & reg_class_contents
[cl
]
1109 & ~no_unit_alloc_regs
);
1110 if (! hard_reg_set_empty_p (temp_hard_regset
))
1113 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
1115 cost
= (ira_memory_move_cost
[mode
][aclass
][0]
1116 + ira_memory_move_cost
[mode
][aclass
][1]);
1117 if (min_cost
> cost
)
1120 if (best_class
== NO_REGS
|| best_cost
> min_cost
)
1122 best_class
= aclass
;
1123 best_cost
= min_cost
;
1127 class_translate
[cl
] = best_class
;
1131 /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and
1132 IRA_PRESSURE_CLASS_TRANSLATE. */
1134 setup_class_translate (void)
1136 setup_class_translate_array (ira_allocno_class_translate
,
1137 ira_allocno_classes_num
, ira_allocno_classes
);
1138 setup_class_translate_array (ira_pressure_class_translate
,
1139 ira_pressure_classes_num
, ira_pressure_classes
);
1142 /* Order numbers of allocno classes in original target allocno class
1143 array, -1 for non-allocno classes. */
1144 static int allocno_class_order
[N_REG_CLASSES
];
1146 /* The function used to sort the important classes. */
1148 comp_reg_classes_func (const void *v1p
, const void *v2p
)
1150 enum reg_class cl1
= *(const enum reg_class
*) v1p
;
1151 enum reg_class cl2
= *(const enum reg_class
*) v2p
;
1152 enum reg_class tcl1
, tcl2
;
1155 tcl1
= ira_allocno_class_translate
[cl1
];
1156 tcl2
= ira_allocno_class_translate
[cl2
];
1157 if (tcl1
!= NO_REGS
&& tcl2
!= NO_REGS
1158 && (diff
= allocno_class_order
[tcl1
] - allocno_class_order
[tcl2
]) != 0)
1160 return (int) cl1
- (int) cl2
;
1163 /* For correct work of function setup_reg_class_relation we need to
1164 reorder important classes according to the order of their allocno
1165 classes. It places important classes containing the same
1166 allocatable hard register set adjacent to each other and allocno
1167 class with the allocatable hard register set right after the other
1168 important classes with the same set.
1170 In example from comments of function
1171 setup_allocno_and_important_classes, it places LEGACY_REGS and
1172 GENERAL_REGS close to each other and GENERAL_REGS is after
1175 reorder_important_classes (void)
1179 for (i
= 0; i
< N_REG_CLASSES
; i
++)
1180 allocno_class_order
[i
] = -1;
1181 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1182 allocno_class_order
[ira_allocno_classes
[i
]] = i
;
1183 qsort (ira_important_classes
, ira_important_classes_num
,
1184 sizeof (enum reg_class
), comp_reg_classes_func
);
1185 for (i
= 0; i
< ira_important_classes_num
; i
++)
1186 ira_important_class_nums
[ira_important_classes
[i
]] = i
;
1189 /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION,
1190 IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and
1191 IRA_REG_CLASSES_INTERSECT_P. For the meaning of the relations,
1192 please see corresponding comments in ira-int.h. */
1194 setup_reg_class_relations (void)
1196 int i
, cl1
, cl2
, cl3
;
1197 HARD_REG_SET intersection_set
, union_set
, temp_set2
;
1198 bool important_class_p
[N_REG_CLASSES
];
1200 memset (important_class_p
, 0, sizeof (important_class_p
));
1201 for (i
= 0; i
< ira_important_classes_num
; i
++)
1202 important_class_p
[ira_important_classes
[i
]] = true;
1203 for (cl1
= 0; cl1
< N_REG_CLASSES
; cl1
++)
1205 ira_reg_class_super_classes
[cl1
][0] = LIM_REG_CLASSES
;
1206 for (cl2
= 0; cl2
< N_REG_CLASSES
; cl2
++)
1208 ira_reg_classes_intersect_p
[cl1
][cl2
] = false;
1209 ira_reg_class_intersect
[cl1
][cl2
] = NO_REGS
;
1210 ira_reg_class_subset
[cl1
][cl2
] = NO_REGS
;
1211 temp_hard_regset
= reg_class_contents
[cl1
] & ~no_unit_alloc_regs
;
1212 temp_set2
= reg_class_contents
[cl2
] & ~no_unit_alloc_regs
;
1213 if (hard_reg_set_empty_p (temp_hard_regset
)
1214 && hard_reg_set_empty_p (temp_set2
))
1216 /* The both classes have no allocatable hard registers
1217 -- take all class hard registers into account and use
1218 reg_class_subunion and reg_class_superunion. */
1221 cl3
= reg_class_subclasses
[cl1
][i
];
1222 if (cl3
== LIM_REG_CLASSES
)
1224 if (reg_class_subset_p (ira_reg_class_intersect
[cl1
][cl2
],
1225 (enum reg_class
) cl3
))
1226 ira_reg_class_intersect
[cl1
][cl2
] = (enum reg_class
) cl3
;
1228 ira_reg_class_subunion
[cl1
][cl2
] = reg_class_subunion
[cl1
][cl2
];
1229 ira_reg_class_superunion
[cl1
][cl2
] = reg_class_superunion
[cl1
][cl2
];
1232 ira_reg_classes_intersect_p
[cl1
][cl2
]
1233 = hard_reg_set_intersect_p (temp_hard_regset
, temp_set2
);
1234 if (important_class_p
[cl1
] && important_class_p
[cl2
]
1235 && hard_reg_set_subset_p (temp_hard_regset
, temp_set2
))
1237 /* CL1 and CL2 are important classes and CL1 allocatable
1238 hard register set is inside of CL2 allocatable hard
1239 registers -- make CL1 a superset of CL2. */
1242 p
= &ira_reg_class_super_classes
[cl1
][0];
1243 while (*p
!= LIM_REG_CLASSES
)
1245 *p
++ = (enum reg_class
) cl2
;
1246 *p
= LIM_REG_CLASSES
;
1248 ira_reg_class_subunion
[cl1
][cl2
] = NO_REGS
;
1249 ira_reg_class_superunion
[cl1
][cl2
] = NO_REGS
;
1250 intersection_set
= (reg_class_contents
[cl1
]
1251 & reg_class_contents
[cl2
]
1252 & ~no_unit_alloc_regs
);
1253 union_set
= ((reg_class_contents
[cl1
] | reg_class_contents
[cl2
])
1254 & ~no_unit_alloc_regs
);
1255 for (cl3
= 0; cl3
< N_REG_CLASSES
; cl3
++)
1257 temp_hard_regset
= reg_class_contents
[cl3
] & ~no_unit_alloc_regs
;
1258 if (hard_reg_set_subset_p (temp_hard_regset
, intersection_set
))
1260 /* CL3 allocatable hard register set is inside of
1261 intersection of allocatable hard register sets
1263 if (important_class_p
[cl3
])
1266 = (reg_class_contents
1267 [ira_reg_class_intersect
[cl1
][cl2
]]);
1268 temp_set2
&= ~no_unit_alloc_regs
;
1269 if (! hard_reg_set_subset_p (temp_hard_regset
, temp_set2
)
1270 /* If the allocatable hard register sets are
1271 the same, prefer GENERAL_REGS or the
1272 smallest class for debugging
1274 || (temp_hard_regset
== temp_set2
1275 && (cl3
== GENERAL_REGS
1276 || ((ira_reg_class_intersect
[cl1
][cl2
]
1278 && hard_reg_set_subset_p
1279 (reg_class_contents
[cl3
],
1282 ira_reg_class_intersect
[cl1
][cl2
]])))))
1283 ira_reg_class_intersect
[cl1
][cl2
] = (enum reg_class
) cl3
;
1286 = (reg_class_contents
[ira_reg_class_subset
[cl1
][cl2
]]
1287 & ~no_unit_alloc_regs
);
1288 if (! hard_reg_set_subset_p (temp_hard_regset
, temp_set2
)
1289 /* Ignore unavailable hard registers and prefer
1290 smallest class for debugging purposes. */
1291 || (temp_hard_regset
== temp_set2
1292 && hard_reg_set_subset_p
1293 (reg_class_contents
[cl3
],
1295 [(int) ira_reg_class_subset
[cl1
][cl2
]])))
1296 ira_reg_class_subset
[cl1
][cl2
] = (enum reg_class
) cl3
;
1298 if (important_class_p
[cl3
]
1299 && hard_reg_set_subset_p (temp_hard_regset
, union_set
))
1301 /* CL3 allocatable hard register set is inside of
1302 union of allocatable hard register sets of CL1
1305 = (reg_class_contents
[ira_reg_class_subunion
[cl1
][cl2
]]
1306 & ~no_unit_alloc_regs
);
1307 if (ira_reg_class_subunion
[cl1
][cl2
] == NO_REGS
1308 || (hard_reg_set_subset_p (temp_set2
, temp_hard_regset
)
1310 && (temp_set2
!= temp_hard_regset
1311 || cl3
== GENERAL_REGS
1312 /* If the allocatable hard register sets are the
1313 same, prefer GENERAL_REGS or the smallest
1314 class for debugging purposes. */
1315 || (ira_reg_class_subunion
[cl1
][cl2
] != GENERAL_REGS
1316 && hard_reg_set_subset_p
1317 (reg_class_contents
[cl3
],
1319 [(int) ira_reg_class_subunion
[cl1
][cl2
]])))))
1320 ira_reg_class_subunion
[cl1
][cl2
] = (enum reg_class
) cl3
;
1322 if (hard_reg_set_subset_p (union_set
, temp_hard_regset
))
1324 /* CL3 allocatable hard register set contains union
1325 of allocatable hard register sets of CL1 and
1328 = (reg_class_contents
[ira_reg_class_superunion
[cl1
][cl2
]]
1329 & ~no_unit_alloc_regs
);
1330 if (ira_reg_class_superunion
[cl1
][cl2
] == NO_REGS
1331 || (hard_reg_set_subset_p (temp_hard_regset
, temp_set2
)
1333 && (temp_set2
!= temp_hard_regset
1334 || cl3
== GENERAL_REGS
1335 /* If the allocatable hard register sets are the
1336 same, prefer GENERAL_REGS or the smallest
1337 class for debugging purposes. */
1338 || (ira_reg_class_superunion
[cl1
][cl2
] != GENERAL_REGS
1339 && hard_reg_set_subset_p
1340 (reg_class_contents
[cl3
],
1342 [(int) ira_reg_class_superunion
[cl1
][cl2
]])))))
1343 ira_reg_class_superunion
[cl1
][cl2
] = (enum reg_class
) cl3
;
1350 /* Output all uniform and important classes into file F. */
1352 print_uniform_and_important_classes (FILE *f
)
1356 fprintf (f
, "Uniform classes:\n");
1357 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1358 if (ira_uniform_class_p
[cl
])
1359 fprintf (f
, " %s", reg_class_names
[cl
]);
1360 fprintf (f
, "\nImportant classes:\n");
1361 for (i
= 0; i
< ira_important_classes_num
; i
++)
1362 fprintf (f
, " %s", reg_class_names
[ira_important_classes
[i
]]);
1366 /* Output all possible allocno or pressure classes and their
1367 translation map into file F. */
1369 print_translated_classes (FILE *f
, bool pressure_p
)
1371 int classes_num
= (pressure_p
1372 ? ira_pressure_classes_num
: ira_allocno_classes_num
);
1373 enum reg_class
*classes
= (pressure_p
1374 ? ira_pressure_classes
: ira_allocno_classes
);
1375 enum reg_class
*class_translate
= (pressure_p
1376 ? ira_pressure_class_translate
1377 : ira_allocno_class_translate
);
1380 fprintf (f
, "%s classes:\n", pressure_p
? "Pressure" : "Allocno");
1381 for (i
= 0; i
< classes_num
; i
++)
1382 fprintf (f
, " %s", reg_class_names
[classes
[i
]]);
1383 fprintf (f
, "\nClass translation:\n");
1384 for (i
= 0; i
< N_REG_CLASSES
; i
++)
1385 fprintf (f
, " %s -> %s\n", reg_class_names
[i
],
1386 reg_class_names
[class_translate
[i
]]);
1389 /* Output all possible allocno and translation classes and the
1390 translation maps into stderr. */
1392 ira_debug_allocno_classes (void)
1394 print_uniform_and_important_classes (stderr
);
1395 print_translated_classes (stderr
, false);
1396 print_translated_classes (stderr
, true);
1399 /* Set up different arrays concerning class subsets, allocno and
1400 important classes. */
1402 find_reg_classes (void)
1404 setup_allocno_and_important_classes ();
1405 setup_class_translate ();
1406 reorder_important_classes ();
1407 setup_reg_class_relations ();
1412 /* Set up the array above. */
1414 setup_hard_regno_aclass (void)
1418 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1421 ira_hard_regno_allocno_class
[i
]
1422 = (TEST_HARD_REG_BIT (no_unit_alloc_regs
, i
)
1424 : ira_allocno_class_translate
[REGNO_REG_CLASS (i
)]);
1428 ira_hard_regno_allocno_class
[i
] = NO_REGS
;
1429 for (j
= 0; j
< ira_allocno_classes_num
; j
++)
1431 cl
= ira_allocno_classes
[j
];
1432 if (ira_class_hard_reg_index
[cl
][i
] >= 0)
1434 ira_hard_regno_allocno_class
[i
] = cl
;
1444 /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps. */
1446 setup_reg_class_nregs (void)
1450 for (m
= 0; m
< MAX_MACHINE_MODE
; m
++)
1452 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1453 ira_reg_class_max_nregs
[cl
][m
]
1454 = ira_reg_class_min_nregs
[cl
][m
]
1455 = targetm
.class_max_nregs ((reg_class_t
) cl
, (machine_mode
) m
);
1456 for (cl
= 0; cl
< N_REG_CLASSES
; cl
++)
1458 (cl2
= alloc_reg_class_subclasses
[cl
][i
]) != LIM_REG_CLASSES
;
1460 if (ira_reg_class_min_nregs
[cl2
][m
]
1461 < ira_reg_class_min_nregs
[cl
][m
])
1462 ira_reg_class_min_nregs
[cl
][m
] = ira_reg_class_min_nregs
[cl2
][m
];
1468 /* Set up IRA_PROHIBITED_CLASS_MODE_REGS and IRA_CLASS_SINGLETON.
1469 This function is called once IRA_CLASS_HARD_REGS has been initialized. */
1471 setup_prohibited_class_mode_regs (void)
1473 int j
, k
, hard_regno
, cl
, last_hard_regno
, count
;
1475 for (cl
= (int) N_REG_CLASSES
- 1; cl
>= 0; cl
--)
1477 temp_hard_regset
= reg_class_contents
[cl
] & ~no_unit_alloc_regs
;
1478 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
1481 last_hard_regno
= -1;
1482 CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs
[cl
][j
]);
1483 for (k
= ira_class_hard_regs_num
[cl
] - 1; k
>= 0; k
--)
1485 hard_regno
= ira_class_hard_regs
[cl
][k
];
1486 if (!targetm
.hard_regno_mode_ok (hard_regno
, (machine_mode
) j
))
1487 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs
[cl
][j
],
1489 else if (in_hard_reg_set_p (temp_hard_regset
,
1490 (machine_mode
) j
, hard_regno
))
1492 last_hard_regno
= hard_regno
;
1496 ira_class_singleton
[cl
][j
] = (count
== 1 ? last_hard_regno
: -1);
1501 /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers
1502 spanning from one register pressure class to another one. It is
1503 called after defining the pressure classes. */
1505 clarify_prohibited_class_mode_regs (void)
1507 int j
, k
, hard_regno
, cl
, pclass
, nregs
;
1509 for (cl
= (int) N_REG_CLASSES
- 1; cl
>= 0; cl
--)
1510 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
1512 CLEAR_HARD_REG_SET (ira_useful_class_mode_regs
[cl
][j
]);
1513 for (k
= ira_class_hard_regs_num
[cl
] - 1; k
>= 0; k
--)
1515 hard_regno
= ira_class_hard_regs
[cl
][k
];
1516 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs
[cl
][j
], hard_regno
))
1518 nregs
= hard_regno_nregs (hard_regno
, (machine_mode
) j
);
1519 if (hard_regno
+ nregs
> FIRST_PSEUDO_REGISTER
)
1521 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs
[cl
][j
],
1525 pclass
= ira_pressure_class_translate
[REGNO_REG_CLASS (hard_regno
)];
1526 for (nregs
-- ;nregs
>= 0; nregs
--)
1527 if (((enum reg_class
) pclass
1528 != ira_pressure_class_translate
[REGNO_REG_CLASS
1529 (hard_regno
+ nregs
)]))
1531 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs
[cl
][j
],
1535 if (!TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs
[cl
][j
],
1537 add_to_hard_reg_set (&ira_useful_class_mode_regs
[cl
][j
],
1538 (machine_mode
) j
, hard_regno
);
1543 /* Allocate and initialize IRA_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST
1544 and IRA_MAY_MOVE_OUT_COST for MODE. */
1546 ira_init_register_move_cost (machine_mode mode
)
1548 static unsigned short last_move_cost
[N_REG_CLASSES
][N_REG_CLASSES
];
1549 bool all_match
= true;
1550 unsigned int i
, cl1
, cl2
;
1551 HARD_REG_SET ok_regs
;
1553 ira_assert (ira_register_move_cost
[mode
] == NULL
1554 && ira_may_move_in_cost
[mode
] == NULL
1555 && ira_may_move_out_cost
[mode
] == NULL
);
1556 CLEAR_HARD_REG_SET (ok_regs
);
1557 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1558 if (targetm
.hard_regno_mode_ok (i
, mode
))
1559 SET_HARD_REG_BIT (ok_regs
, i
);
1561 /* Note that we might be asked about the move costs of modes that
1562 cannot be stored in any hard register, for example if an inline
1563 asm tries to create a register operand with an impossible mode.
1564 We therefore can't assert have_regs_of_mode[mode] here. */
1565 for (cl1
= 0; cl1
< N_REG_CLASSES
; cl1
++)
1566 for (cl2
= 0; cl2
< N_REG_CLASSES
; cl2
++)
1569 if (!hard_reg_set_intersect_p (ok_regs
, reg_class_contents
[cl1
])
1570 || !hard_reg_set_intersect_p (ok_regs
, reg_class_contents
[cl2
]))
1572 if ((ira_reg_class_max_nregs
[cl1
][mode
]
1573 > ira_class_hard_regs_num
[cl1
])
1574 || (ira_reg_class_max_nregs
[cl2
][mode
]
1575 > ira_class_hard_regs_num
[cl2
]))
1578 cost
= (ira_memory_move_cost
[mode
][cl1
][0]
1579 + ira_memory_move_cost
[mode
][cl2
][1]) * 2;
1583 cost
= register_move_cost (mode
, (enum reg_class
) cl1
,
1584 (enum reg_class
) cl2
);
1585 ira_assert (cost
< 65535);
1587 all_match
&= (last_move_cost
[cl1
][cl2
] == cost
);
1588 last_move_cost
[cl1
][cl2
] = cost
;
1590 if (all_match
&& last_mode_for_init_move_cost
!= -1)
1592 ira_register_move_cost
[mode
]
1593 = ira_register_move_cost
[last_mode_for_init_move_cost
];
1594 ira_may_move_in_cost
[mode
]
1595 = ira_may_move_in_cost
[last_mode_for_init_move_cost
];
1596 ira_may_move_out_cost
[mode
]
1597 = ira_may_move_out_cost
[last_mode_for_init_move_cost
];
1600 last_mode_for_init_move_cost
= mode
;
1601 ira_register_move_cost
[mode
] = XNEWVEC (move_table
, N_REG_CLASSES
);
1602 ira_may_move_in_cost
[mode
] = XNEWVEC (move_table
, N_REG_CLASSES
);
1603 ira_may_move_out_cost
[mode
] = XNEWVEC (move_table
, N_REG_CLASSES
);
1604 for (cl1
= 0; cl1
< N_REG_CLASSES
; cl1
++)
1605 for (cl2
= 0; cl2
< N_REG_CLASSES
; cl2
++)
1608 enum reg_class
*p1
, *p2
;
1610 if (last_move_cost
[cl1
][cl2
] == 65535)
1612 ira_register_move_cost
[mode
][cl1
][cl2
] = 65535;
1613 ira_may_move_in_cost
[mode
][cl1
][cl2
] = 65535;
1614 ira_may_move_out_cost
[mode
][cl1
][cl2
] = 65535;
1618 cost
= last_move_cost
[cl1
][cl2
];
1620 for (p2
= ®_class_subclasses
[cl2
][0];
1621 *p2
!= LIM_REG_CLASSES
; p2
++)
1622 if (ira_class_hard_regs_num
[*p2
] > 0
1623 && (ira_reg_class_max_nregs
[*p2
][mode
]
1624 <= ira_class_hard_regs_num
[*p2
]))
1625 cost
= MAX (cost
, ira_register_move_cost
[mode
][cl1
][*p2
]);
1627 for (p1
= ®_class_subclasses
[cl1
][0];
1628 *p1
!= LIM_REG_CLASSES
; p1
++)
1629 if (ira_class_hard_regs_num
[*p1
] > 0
1630 && (ira_reg_class_max_nregs
[*p1
][mode
]
1631 <= ira_class_hard_regs_num
[*p1
]))
1632 cost
= MAX (cost
, ira_register_move_cost
[mode
][*p1
][cl2
]);
1634 ira_assert (cost
<= 65535);
1635 ira_register_move_cost
[mode
][cl1
][cl2
] = cost
;
1637 if (ira_class_subset_p
[cl1
][cl2
])
1638 ira_may_move_in_cost
[mode
][cl1
][cl2
] = 0;
1640 ira_may_move_in_cost
[mode
][cl1
][cl2
] = cost
;
1642 if (ira_class_subset_p
[cl2
][cl1
])
1643 ira_may_move_out_cost
[mode
][cl1
][cl2
] = 0;
1645 ira_may_move_out_cost
[mode
][cl1
][cl2
] = cost
;
1652 /* This is called once during compiler work. It sets up
1653 different arrays whose values don't depend on the compiled
1656 ira_init_once (void)
1658 ira_init_costs_once ();
1661 ira_use_lra_p
= targetm
.lra_p ();
1664 /* Free ira_max_register_move_cost, ira_may_move_in_cost and
1665 ira_may_move_out_cost for each mode. */
1667 target_ira_int::free_register_move_costs (void)
1671 /* Reset move_cost and friends, making sure we only free shared
1672 table entries once. */
1673 for (mode
= 0; mode
< MAX_MACHINE_MODE
; mode
++)
1674 if (x_ira_register_move_cost
[mode
])
1677 i
< mode
&& (x_ira_register_move_cost
[i
]
1678 != x_ira_register_move_cost
[mode
]);
1683 free (x_ira_register_move_cost
[mode
]);
1684 free (x_ira_may_move_in_cost
[mode
]);
1685 free (x_ira_may_move_out_cost
[mode
]);
1688 memset (x_ira_register_move_cost
, 0, sizeof x_ira_register_move_cost
);
1689 memset (x_ira_may_move_in_cost
, 0, sizeof x_ira_may_move_in_cost
);
1690 memset (x_ira_may_move_out_cost
, 0, sizeof x_ira_may_move_out_cost
);
1691 last_mode_for_init_move_cost
= -1;
1694 target_ira_int::~target_ira_int ()
1697 free_register_move_costs ();
1700 /* This is called every time when register related information is
1705 this_target_ira_int
->free_register_move_costs ();
1706 setup_reg_mode_hard_regset ();
1707 setup_alloc_regs (flag_omit_frame_pointer
!= 0);
1708 setup_class_subset_and_memory_move_costs ();
1709 setup_reg_class_nregs ();
1710 setup_prohibited_class_mode_regs ();
1711 find_reg_classes ();
1712 clarify_prohibited_class_mode_regs ();
1713 setup_hard_regno_aclass ();
1718 #define ira_prohibited_mode_move_regs_initialized_p \
1719 (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p)
1721 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */
1723 setup_prohibited_mode_move_regs (void)
1726 rtx test_reg1
, test_reg2
, move_pat
;
1727 rtx_insn
*move_insn
;
1729 if (ira_prohibited_mode_move_regs_initialized_p
)
1731 ira_prohibited_mode_move_regs_initialized_p
= true;
1732 test_reg1
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
1733 test_reg2
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 2);
1734 move_pat
= gen_rtx_SET (test_reg1
, test_reg2
);
1735 move_insn
= gen_rtx_INSN (VOIDmode
, 0, 0, 0, move_pat
, 0, -1, 0);
1736 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
1738 SET_HARD_REG_SET (ira_prohibited_mode_move_regs
[i
]);
1739 for (j
= 0; j
< FIRST_PSEUDO_REGISTER
; j
++)
1741 if (!targetm
.hard_regno_mode_ok (j
, (machine_mode
) i
))
1743 set_mode_and_regno (test_reg1
, (machine_mode
) i
, j
);
1744 set_mode_and_regno (test_reg2
, (machine_mode
) i
, j
);
1745 INSN_CODE (move_insn
) = -1;
1746 recog_memoized (move_insn
);
1747 if (INSN_CODE (move_insn
) < 0)
1749 extract_insn (move_insn
);
1750 /* We don't know whether the move will be in code that is optimized
1751 for size or speed, so consider all enabled alternatives. */
1752 if (! constrain_operands (1, get_enabled_alternatives (move_insn
)))
1754 CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs
[i
], j
);
1761 /* Extract INSN and return the set of alternatives that we should consider.
1762 This excludes any alternatives whose constraints are obviously impossible
1763 to meet (e.g. because the constraint requires a constant and the operand
1764 is nonconstant). It also excludes alternatives that are bound to need
1765 a spill or reload, as long as we have other alternatives that match
1768 ira_setup_alts (rtx_insn
*insn
)
1773 int commutative
= -1;
1775 extract_insn (insn
);
1776 preprocess_constraints (insn
);
1777 alternative_mask preferred
= get_preferred_alternatives (insn
);
1778 alternative_mask alts
= 0;
1779 alternative_mask exact_alts
= 0;
1780 /* Check that the hard reg set is enough for holding all
1781 alternatives. It is hard to imagine the situation when the
1782 assertion is wrong. */
1783 ira_assert (recog_data
.n_alternatives
1784 <= (int) MAX (sizeof (HARD_REG_ELT_TYPE
) * CHAR_BIT
,
1785 FIRST_PSEUDO_REGISTER
));
1786 for (nop
= 0; nop
< recog_data
.n_operands
; nop
++)
1787 if (recog_data
.constraints
[nop
][0] == '%')
1792 for (curr_swapped
= false;; curr_swapped
= true)
1794 for (nalt
= 0; nalt
< recog_data
.n_alternatives
; nalt
++)
1796 if (!TEST_BIT (preferred
, nalt
) || TEST_BIT (exact_alts
, nalt
))
1799 const operand_alternative
*op_alt
1800 = &recog_op_alt
[nalt
* recog_data
.n_operands
];
1801 int this_reject
= 0;
1802 for (nop
= 0; nop
< recog_data
.n_operands
; nop
++)
1806 this_reject
+= op_alt
[nop
].reject
;
1808 rtx op
= recog_data
.operand
[nop
];
1809 p
= op_alt
[nop
].constraint
;
1810 if (*p
== 0 || *p
== ',')
1815 switch (c
= *p
, len
= CONSTRAINT_LEN (c
, p
), c
)
1826 /* The commutative modifier is handled above. */
1829 case '0': case '1': case '2': case '3': case '4':
1830 case '5': case '6': case '7': case '8': case '9':
1833 unsigned long dup
= strtoul (p
, &end
, 10);
1834 rtx other
= recog_data
.operand
[dup
];
1837 ? rtx_equal_p (other
, op
)
1838 : REG_P (op
) || SUBREG_P (op
))
1850 enum constraint_num cn
= lookup_constraint (p
);
1852 switch (get_constraint_type (cn
))
1855 if (reg_class_for_constraint (cn
) != NO_REGS
)
1857 if (REG_P (op
) || SUBREG_P (op
))
1864 if (CONST_INT_P (op
)
1865 && (insn_const_int_ok_for_constraint
1874 case CT_RELAXED_MEMORY
:
1877 case CT_SPECIAL_MEMORY
:
1879 mem
= extract_mem_from_operand (op
);
1886 if (constraint_satisfied_p (op
, cn
))
1893 while (p
+= len
, c
);
1896 /* We can make the alternative match by spilling a register
1897 to memory or loading something into a register. Count a
1898 cost of one reload (the equivalent of the '?' constraint). */
1904 if (nop
>= recog_data
.n_operands
)
1906 alts
|= ALTERNATIVE_BIT (nalt
);
1907 if (this_reject
== 0)
1908 exact_alts
|= ALTERNATIVE_BIT (nalt
);
1911 if (commutative
< 0)
1913 /* Swap forth and back to avoid changing recog_data. */
1914 std::swap (recog_data
.operand
[commutative
],
1915 recog_data
.operand
[commutative
+ 1]);
1919 return exact_alts
? exact_alts
: alts
;
1922 /* Return the number of the output non-early clobber operand which
1923 should be the same in any case as operand with number OP_NUM (or
1924 negative value if there is no such operand). ALTS is the mask
1925 of alternatives that we should consider. SINGLE_INPUT_OP_HAS_CSTR_P
1926 should be set in this function, it indicates whether there is only
1927 a single input operand which has the matching constraint on the
1928 output operand at the position specified in return value. If the
1929 pattern allows any one of several input operands holds the matching
1930 constraint, it's set as false, one typical case is destructive FMA
1931 instruction on target rs6000. Note that for a non-NO_REG preferred
1932 register class with no free register move copy, if the parameter
1933 PARAM_IRA_CONSIDER_DUP_IN_ALL_ALTS is set to one, this function
1934 will check all available alternatives for matching constraints,
1935 even if it has found or will find one alternative with non-NO_REG
1936 regclass, it can respect more cases with matching constraints. If
1937 PARAM_IRA_CONSIDER_DUP_IN_ALL_ALTS is set to zero,
1938 SINGLE_INPUT_OP_HAS_CSTR_P is always true, it will stop to find
1939 matching constraint relationship once it hits some alternative with
1940 some non-NO_REG regclass. */
1942 ira_get_dup_out_num (int op_num
, alternative_mask alts
,
1943 bool &single_input_op_has_cstr_p
)
1945 int curr_alt
, c
, original
;
1946 bool ignore_p
, use_commut_op_p
;
1949 if (op_num
< 0 || recog_data
.n_alternatives
== 0)
1951 /* We should find duplications only for input operands. */
1952 if (recog_data
.operand_type
[op_num
] != OP_IN
)
1954 str
= recog_data
.constraints
[op_num
];
1955 use_commut_op_p
= false;
1956 single_input_op_has_cstr_p
= true;
1958 rtx op
= recog_data
.operand
[op_num
];
1959 int op_regno
= reg_or_subregno (op
);
1960 enum reg_class op_pref_cl
= reg_preferred_class (op_regno
);
1961 machine_mode op_mode
= GET_MODE (op
);
1963 ira_init_register_move_cost_if_necessary (op_mode
);
1964 /* If the preferred regclass isn't NO_REG, continue to find the matching
1965 constraint in all available alternatives with preferred regclass, even
1966 if we have found or will find one alternative whose constraint stands
1967 for a REG (non-NO_REG) regclass. Note that it would be fine not to
1968 respect matching constraint if the register copy is free, so exclude
1970 bool respect_dup_despite_reg_cstr
1971 = param_ira_consider_dup_in_all_alts
1972 && op_pref_cl
!= NO_REGS
1973 && ira_register_move_cost
[op_mode
][op_pref_cl
][op_pref_cl
] > 0;
1975 /* Record the alternative whose constraint uses the same regclass as the
1976 preferred regclass, later if we find one matching constraint for this
1977 operand with preferred reclass, we will visit these recorded
1978 alternatives to check whether if there is one alternative in which no
1979 any INPUT operands have one matching constraint same as our candidate.
1980 If yes, it means there is one alternative which is perfectly fine
1981 without satisfying this matching constraint. If no, it means in any
1982 alternatives there is one other INPUT operand holding this matching
1983 constraint, it's fine to respect this matching constraint and further
1984 create this constraint copy since it would become harmless once some
1985 other takes preference and it's interfered. */
1986 alternative_mask pref_cl_alts
;
1992 for (curr_alt
= 0, ignore_p
= !TEST_BIT (alts
, curr_alt
),
2003 ignore_p
= !TEST_BIT (alts
, curr_alt
);
2005 else if (! ignore_p
)
2012 enum constraint_num cn
= lookup_constraint (str
);
2013 enum reg_class cl
= reg_class_for_constraint (cn
);
2014 if (cl
!= NO_REGS
&& !targetm
.class_likely_spilled_p (cl
))
2016 if (respect_dup_despite_reg_cstr
)
2018 /* If it's free to move from one preferred class to
2019 the one without matching constraint, it doesn't
2020 have to respect this constraint with costs. */
2021 if (cl
!= op_pref_cl
2022 && (ira_reg_class_intersect
[cl
][op_pref_cl
]
2024 && (ira_may_move_in_cost
[op_mode
][op_pref_cl
][cl
]
2027 else if (cl
== op_pref_cl
)
2028 pref_cl_alts
|= ALTERNATIVE_BIT (curr_alt
);
2033 if (constraint_satisfied_p (op
, cn
))
2038 case '0': case '1': case '2': case '3': case '4':
2039 case '5': case '6': case '7': case '8': case '9':
2042 int n
= (int) strtoul (str
, &end
, 10);
2044 if (original
!= -1 && original
!= n
)
2046 gcc_assert (n
< recog_data
.n_operands
);
2047 if (respect_dup_despite_reg_cstr
)
2049 const operand_alternative
*op_alt
2050 = &recog_op_alt
[curr_alt
* recog_data
.n_operands
];
2051 /* Only respect the one with preferred rclass, without
2052 respect_dup_despite_reg_cstr it's possible to get
2053 one whose regclass isn't preferred first before,
2054 but it would fail since there should be other
2055 alternatives with preferred regclass. */
2056 if (op_alt
[n
].cl
== op_pref_cl
)
2064 str
+= CONSTRAINT_LEN (c
, str
);
2068 if (recog_data
.operand_type
[original
] == OP_OUT
)
2070 if (pref_cl_alts
== 0)
2072 /* Visit these recorded alternatives to check whether
2073 there is one alternative in which no any INPUT operands
2074 have one matching constraint same as our candidate.
2075 Give up this candidate if so. */
2077 for (nalt
= 0; nalt
< recog_data
.n_alternatives
; nalt
++)
2079 if (!TEST_BIT (pref_cl_alts
, nalt
))
2081 const operand_alternative
*op_alt
2082 = &recog_op_alt
[nalt
* recog_data
.n_operands
];
2083 bool dup_in_other
= false;
2084 for (nop
= 0; nop
< recog_data
.n_operands
; nop
++)
2086 if (recog_data
.operand_type
[nop
] != OP_IN
)
2090 if (op_alt
[nop
].matches
== original
)
2092 dup_in_other
= true;
2099 single_input_op_has_cstr_p
= false;
2103 if (use_commut_op_p
)
2105 use_commut_op_p
= true;
2106 if (recog_data
.constraints
[op_num
][0] == '%')
2107 str
= recog_data
.constraints
[op_num
+ 1];
2108 else if (op_num
> 0 && recog_data
.constraints
[op_num
- 1][0] == '%')
2109 str
= recog_data
.constraints
[op_num
- 1];
2118 /* Search forward to see if the source register of a copy insn dies
2119 before either it or the destination register is modified, but don't
2120 scan past the end of the basic block. If so, we can replace the
2121 source with the destination and let the source die in the copy
2124 This will reduce the number of registers live in that range and may
2125 enable the destination and the source coalescing, thus often saving
2126 one register in addition to a register-register copy. */
2129 decrease_live_ranges_number (void)
2133 rtx set
, src
, dest
, dest_death
, note
;
2137 if (! flag_expensive_optimizations
)
2141 fprintf (ira_dump_file
, "Starting decreasing number of live ranges...\n");
2143 FOR_EACH_BB_FN (bb
, cfun
)
2144 FOR_BB_INSNS (bb
, insn
)
2146 set
= single_set (insn
);
2149 src
= SET_SRC (set
);
2150 dest
= SET_DEST (set
);
2151 if (! REG_P (src
) || ! REG_P (dest
)
2152 || find_reg_note (insn
, REG_DEAD
, src
))
2154 sregno
= REGNO (src
);
2155 dregno
= REGNO (dest
);
2157 /* We don't want to mess with hard regs if register classes
2159 if (sregno
== dregno
2160 || (targetm
.small_register_classes_for_mode_p (GET_MODE (src
))
2161 && (sregno
< FIRST_PSEUDO_REGISTER
2162 || dregno
< FIRST_PSEUDO_REGISTER
))
2163 /* We don't see all updates to SP if they are in an
2164 auto-inc memory reference, so we must disallow this
2165 optimization on them. */
2166 || sregno
== STACK_POINTER_REGNUM
2167 || dregno
== STACK_POINTER_REGNUM
)
2170 dest_death
= NULL_RTX
;
2172 for (p
= NEXT_INSN (insn
); p
; p
= NEXT_INSN (p
))
2176 if (BLOCK_FOR_INSN (p
) != bb
)
2179 if (reg_set_p (src
, p
) || reg_set_p (dest
, p
)
2180 /* If SRC is an asm-declared register, it must not be
2181 replaced in any asm. Unfortunately, the REG_EXPR
2182 tree for the asm variable may be absent in the SRC
2183 rtx, so we can't check the actual register
2184 declaration easily (the asm operand will have it,
2185 though). To avoid complicating the test for a rare
2186 case, we just don't perform register replacement
2187 for a hard reg mentioned in an asm. */
2188 || (sregno
< FIRST_PSEUDO_REGISTER
2189 && asm_noperands (PATTERN (p
)) >= 0
2190 && reg_overlap_mentioned_p (src
, PATTERN (p
)))
2191 /* Don't change hard registers used by a call. */
2192 || (CALL_P (p
) && sregno
< FIRST_PSEUDO_REGISTER
2193 && find_reg_fusage (p
, USE
, src
))
2194 /* Don't change a USE of a register. */
2195 || (GET_CODE (PATTERN (p
)) == USE
2196 && reg_overlap_mentioned_p (src
, XEXP (PATTERN (p
), 0))))
2199 /* See if all of SRC dies in P. This test is slightly
2200 more conservative than it needs to be. */
2201 if ((note
= find_regno_note (p
, REG_DEAD
, sregno
))
2202 && GET_MODE (XEXP (note
, 0)) == GET_MODE (src
))
2206 /* We can do the optimization. Scan forward from INSN
2207 again, replacing regs as we go. Set FAILED if a
2208 replacement can't be done. In that case, we can't
2209 move the death note for SRC. This should be
2212 /* Set to stop at next insn. */
2213 for (q
= next_real_insn (insn
);
2214 q
!= next_real_insn (p
);
2215 q
= next_real_insn (q
))
2217 if (reg_overlap_mentioned_p (src
, PATTERN (q
)))
2219 /* If SRC is a hard register, we might miss
2220 some overlapping registers with
2221 validate_replace_rtx, so we would have to
2222 undo it. We can't if DEST is present in
2223 the insn, so fail in that combination of
2225 if (sregno
< FIRST_PSEUDO_REGISTER
2226 && reg_mentioned_p (dest
, PATTERN (q
)))
2229 /* Attempt to replace all uses. */
2230 else if (!validate_replace_rtx (src
, dest
, q
))
2233 /* If this succeeded, but some part of the
2234 register is still present, undo the
2236 else if (sregno
< FIRST_PSEUDO_REGISTER
2237 && reg_overlap_mentioned_p (src
, PATTERN (q
)))
2239 validate_replace_rtx (dest
, src
, q
);
2244 /* If DEST dies here, remove the death note and
2245 save it for later. Make sure ALL of DEST dies
2246 here; again, this is overly conservative. */
2248 && (dest_death
= find_regno_note (q
, REG_DEAD
, dregno
)))
2250 if (GET_MODE (XEXP (dest_death
, 0)) == GET_MODE (dest
))
2251 remove_note (q
, dest_death
);
2262 /* Move death note of SRC from P to INSN. */
2263 remove_note (p
, note
);
2264 XEXP (note
, 1) = REG_NOTES (insn
);
2265 REG_NOTES (insn
) = note
;
2268 /* DEST is also dead if INSN has a REG_UNUSED note for
2272 = find_regno_note (insn
, REG_UNUSED
, dregno
)))
2274 PUT_REG_NOTE_KIND (dest_death
, REG_DEAD
);
2275 remove_note (insn
, dest_death
);
2278 /* Put death note of DEST on P if we saw it die. */
2281 XEXP (dest_death
, 1) = REG_NOTES (p
);
2282 REG_NOTES (p
) = dest_death
;
2287 /* If SRC is a hard register which is set or killed in
2288 some other way, we can't do this optimization. */
2289 else if (sregno
< FIRST_PSEUDO_REGISTER
&& dead_or_set_p (p
, src
))
2297 /* Return nonzero if REGNO is a particularly bad choice for reloading X. */
2299 ira_bad_reload_regno_1 (int regno
, rtx x
)
2303 enum reg_class pref
;
2305 /* We only deal with pseudo regs. */
2306 if (! x
|| GET_CODE (x
) != REG
)
2309 x_regno
= REGNO (x
);
2310 if (x_regno
< FIRST_PSEUDO_REGISTER
)
2313 /* If the pseudo prefers REGNO explicitly, then do not consider
2314 REGNO a bad spill choice. */
2315 pref
= reg_preferred_class (x_regno
);
2316 if (reg_class_size
[pref
] == 1)
2317 return !TEST_HARD_REG_BIT (reg_class_contents
[pref
], regno
);
2319 /* If the pseudo conflicts with REGNO, then we consider REGNO a
2320 poor choice for a reload regno. */
2321 a
= ira_regno_allocno_map
[x_regno
];
2322 n
= ALLOCNO_NUM_OBJECTS (a
);
2323 for (i
= 0; i
< n
; i
++)
2325 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2326 if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), regno
))
2332 /* Return nonzero if REGNO is a particularly bad choice for reloading
2335 ira_bad_reload_regno (int regno
, rtx in
, rtx out
)
2337 return (ira_bad_reload_regno_1 (regno
, in
)
2338 || ira_bad_reload_regno_1 (regno
, out
));
2341 /* Add register clobbers from asm statements. */
2343 compute_regs_asm_clobbered (void)
2347 FOR_EACH_BB_FN (bb
, cfun
)
2350 FOR_BB_INSNS_REVERSE (bb
, insn
)
2354 if (NONDEBUG_INSN_P (insn
) && asm_noperands (PATTERN (insn
)) >= 0)
2355 FOR_EACH_INSN_DEF (def
, insn
)
2357 unsigned int dregno
= DF_REF_REGNO (def
);
2358 if (HARD_REGISTER_NUM_P (dregno
))
2359 add_to_hard_reg_set (&crtl
->asm_clobbers
,
2360 GET_MODE (DF_REF_REAL_REG (def
)),
2368 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and
2371 ira_setup_eliminable_regset (void)
2374 static const struct {const int from
, to
; } eliminables
[] = ELIMINABLE_REGS
;
2375 int fp_reg_count
= hard_regno_nregs (HARD_FRAME_POINTER_REGNUM
, Pmode
);
2377 /* Setup is_leaf as frame_pointer_required may use it. This function
2378 is called by sched_init before ira if scheduling is enabled. */
2379 crtl
->is_leaf
= leaf_function_p ();
2381 /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
2382 sp for alloca. So we can't eliminate the frame pointer in that
2383 case. At some point, we should improve this by emitting the
2384 sp-adjusting insns for this case. */
2385 frame_pointer_needed
2386 = (! flag_omit_frame_pointer
2387 || (cfun
->calls_alloca
&& EXIT_IGNORE_STACK
)
2388 /* We need the frame pointer to catch stack overflow exceptions if
2389 the stack pointer is moving (as for the alloca case just above). */
2390 || (STACK_CHECK_MOVING_SP
2393 && cfun
->can_throw_non_call_exceptions
)
2394 || crtl
->accesses_prior_frames
2395 || (SUPPORTS_STACK_ALIGNMENT
&& crtl
->stack_realign_needed
)
2396 || targetm
.frame_pointer_required ());
2398 /* The chance that FRAME_POINTER_NEEDED is changed from inspecting
2399 RTL is very small. So if we use frame pointer for RA and RTL
2400 actually prevents this, we will spill pseudos assigned to the
2401 frame pointer in LRA. */
2403 if (frame_pointer_needed
)
2404 for (i
= 0; i
< fp_reg_count
; i
++)
2405 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM
+ i
, true);
2407 ira_no_alloc_regs
= no_unit_alloc_regs
;
2408 CLEAR_HARD_REG_SET (eliminable_regset
);
2410 compute_regs_asm_clobbered ();
2412 /* Build the regset of all eliminable registers and show we can't
2413 use those that we already know won't be eliminated. */
2414 for (i
= 0; i
< (int) ARRAY_SIZE (eliminables
); i
++)
2417 = (! targetm
.can_eliminate (eliminables
[i
].from
, eliminables
[i
].to
)
2418 || (eliminables
[i
].to
== STACK_POINTER_REGNUM
&& frame_pointer_needed
));
2420 if (!TEST_HARD_REG_BIT (crtl
->asm_clobbers
, eliminables
[i
].from
))
2422 SET_HARD_REG_BIT (eliminable_regset
, eliminables
[i
].from
);
2425 SET_HARD_REG_BIT (ira_no_alloc_regs
, eliminables
[i
].from
);
2427 else if (cannot_elim
)
2428 error ("%s cannot be used in %<asm%> here",
2429 reg_names
[eliminables
[i
].from
]);
2431 df_set_regs_ever_live (eliminables
[i
].from
, true);
2433 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER
)
2435 for (i
= 0; i
< fp_reg_count
; i
++)
2436 if (global_regs
[HARD_FRAME_POINTER_REGNUM
+ i
])
2437 /* Nothing to do: the register is already treated as live
2438 where appropriate, and cannot be eliminated. */
2440 else if (!TEST_HARD_REG_BIT (crtl
->asm_clobbers
,
2441 HARD_FRAME_POINTER_REGNUM
+ i
))
2443 SET_HARD_REG_BIT (eliminable_regset
,
2444 HARD_FRAME_POINTER_REGNUM
+ i
);
2445 if (frame_pointer_needed
)
2446 SET_HARD_REG_BIT (ira_no_alloc_regs
,
2447 HARD_FRAME_POINTER_REGNUM
+ i
);
2449 else if (frame_pointer_needed
)
2450 error ("%s cannot be used in %<asm%> here",
2451 reg_names
[HARD_FRAME_POINTER_REGNUM
+ i
]);
2453 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM
+ i
, true);
2459 /* Vector of substitutions of register numbers,
2460 used to map pseudo regs into hardware regs.
2461 This is set up as a result of register allocation.
2462 Element N is the hard reg assigned to pseudo reg N,
2463 or is -1 if no hard reg was assigned.
2464 If N is a hard reg number, element N is N. */
2465 short *reg_renumber
;
2467 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
2468 the allocation found by IRA. */
2470 setup_reg_renumber (void)
2472 int regno
, hard_regno
;
2474 ira_allocno_iterator ai
;
2476 caller_save_needed
= 0;
2477 FOR_EACH_ALLOCNO (a
, ai
)
2479 if (ira_use_lra_p
&& ALLOCNO_CAP_MEMBER (a
) != NULL
)
2481 /* There are no caps at this point. */
2482 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2483 if (! ALLOCNO_ASSIGNED_P (a
))
2484 /* It can happen if A is not referenced but partially anticipated
2485 somewhere in a region. */
2486 ALLOCNO_ASSIGNED_P (a
) = true;
2487 ira_free_allocno_updated_costs (a
);
2488 hard_regno
= ALLOCNO_HARD_REGNO (a
);
2489 regno
= ALLOCNO_REGNO (a
);
2490 reg_renumber
[regno
] = (hard_regno
< 0 ? -1 : hard_regno
);
2491 if (hard_regno
>= 0)
2494 enum reg_class pclass
;
2497 pclass
= ira_pressure_class_translate
[REGNO_REG_CLASS (hard_regno
)];
2498 nwords
= ALLOCNO_NUM_OBJECTS (a
);
2499 for (i
= 0; i
< nwords
; i
++)
2501 obj
= ALLOCNO_OBJECT (a
, i
);
2502 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
)
2503 |= ~reg_class_contents
[pclass
];
2505 if (ira_need_caller_save_p (a
, hard_regno
))
2507 ira_assert (!optimize
|| flag_caller_saves
2508 || (ALLOCNO_CALLS_CROSSED_NUM (a
)
2509 == ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
))
2510 || regno
>= ira_reg_equiv_len
2511 || ira_equiv_no_lvalue_p (regno
));
2512 caller_save_needed
= 1;
2518 /* Set up allocno assignment flags for further allocation
2521 setup_allocno_assignment_flags (void)
2525 ira_allocno_iterator ai
;
2527 FOR_EACH_ALLOCNO (a
, ai
)
2529 if (! ALLOCNO_ASSIGNED_P (a
))
2530 /* It can happen if A is not referenced but partially anticipated
2531 somewhere in a region. */
2532 ira_free_allocno_updated_costs (a
);
2533 hard_regno
= ALLOCNO_HARD_REGNO (a
);
2534 /* Don't assign hard registers to allocnos which are destination
2535 of removed store at the end of loop. It has no sense to keep
2536 the same value in different hard registers. It is also
2537 impossible to assign hard registers correctly to such
2538 allocnos because the cost info and info about intersected
2539 calls are incorrect for them. */
2540 ALLOCNO_ASSIGNED_P (a
) = (hard_regno
>= 0
2541 || ALLOCNO_EMIT_DATA (a
)->mem_optimized_dest_p
2542 || (ALLOCNO_MEMORY_COST (a
)
2543 - ALLOCNO_CLASS_COST (a
)) < 0);
2546 || ira_hard_reg_in_set_p (hard_regno
, ALLOCNO_MODE (a
),
2547 reg_class_contents
[ALLOCNO_CLASS (a
)]));
2551 /* Evaluate overall allocation cost and the costs for using hard
2552 registers and memory for allocnos. */
2554 calculate_allocation_cost (void)
2556 int hard_regno
, cost
;
2558 ira_allocno_iterator ai
;
2560 ira_overall_cost
= ira_reg_cost
= ira_mem_cost
= 0;
2561 FOR_EACH_ALLOCNO (a
, ai
)
2563 hard_regno
= ALLOCNO_HARD_REGNO (a
);
2564 ira_assert (hard_regno
< 0
2565 || (ira_hard_reg_in_set_p
2566 (hard_regno
, ALLOCNO_MODE (a
),
2567 reg_class_contents
[ALLOCNO_CLASS (a
)])));
2570 cost
= ALLOCNO_MEMORY_COST (a
);
2571 ira_mem_cost
+= cost
;
2573 else if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
2575 cost
= (ALLOCNO_HARD_REG_COSTS (a
)
2576 [ira_class_hard_reg_index
2577 [ALLOCNO_CLASS (a
)][hard_regno
]]);
2578 ira_reg_cost
+= cost
;
2582 cost
= ALLOCNO_CLASS_COST (a
);
2583 ira_reg_cost
+= cost
;
2585 ira_overall_cost
+= cost
;
2588 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
2590 fprintf (ira_dump_file
,
2591 "+++Costs: overall %" PRId64
2597 ira_overall_cost
, ira_reg_cost
, ira_mem_cost
,
2598 ira_load_cost
, ira_store_cost
, ira_shuffle_cost
);
2599 fprintf (ira_dump_file
, "\n+++ move loops %d, new jumps %d\n",
2600 ira_move_loops_num
, ira_additional_jumps_num
);
2605 #ifdef ENABLE_IRA_CHECKING
2606 /* Check the correctness of the allocation. We do need this because
2607 of complicated code to transform more one region internal
2608 representation into one region representation. */
2610 check_allocation (void)
2613 int hard_regno
, nregs
, conflict_nregs
;
2614 ira_allocno_iterator ai
;
2616 FOR_EACH_ALLOCNO (a
, ai
)
2618 int n
= ALLOCNO_NUM_OBJECTS (a
);
2621 if (ALLOCNO_CAP_MEMBER (a
) != NULL
2622 || (hard_regno
= ALLOCNO_HARD_REGNO (a
)) < 0)
2624 nregs
= hard_regno_nregs (hard_regno
, ALLOCNO_MODE (a
));
2626 /* We allocated a single hard register. */
2629 /* We allocated multiple hard registers, and we will test
2630 conflicts in a granularity of single hard regs. */
2633 for (i
= 0; i
< n
; i
++)
2635 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2636 ira_object_t conflict_obj
;
2637 ira_object_conflict_iterator oci
;
2638 int this_regno
= hard_regno
;
2641 if (REG_WORDS_BIG_ENDIAN
)
2642 this_regno
+= n
- i
- 1;
2646 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
2648 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
2649 int conflict_hard_regno
= ALLOCNO_HARD_REGNO (conflict_a
);
2650 if (conflict_hard_regno
< 0)
2653 conflict_nregs
= hard_regno_nregs (conflict_hard_regno
,
2654 ALLOCNO_MODE (conflict_a
));
2656 if (ALLOCNO_NUM_OBJECTS (conflict_a
) > 1
2657 && conflict_nregs
== ALLOCNO_NUM_OBJECTS (conflict_a
))
2659 if (REG_WORDS_BIG_ENDIAN
)
2660 conflict_hard_regno
+= (ALLOCNO_NUM_OBJECTS (conflict_a
)
2661 - OBJECT_SUBWORD (conflict_obj
) - 1);
2663 conflict_hard_regno
+= OBJECT_SUBWORD (conflict_obj
);
2667 if ((conflict_hard_regno
<= this_regno
2668 && this_regno
< conflict_hard_regno
+ conflict_nregs
)
2669 || (this_regno
<= conflict_hard_regno
2670 && conflict_hard_regno
< this_regno
+ nregs
))
2672 fprintf (stderr
, "bad allocation for %d and %d\n",
2673 ALLOCNO_REGNO (a
), ALLOCNO_REGNO (conflict_a
));
2682 /* Allocate REG_EQUIV_INIT. Set up it from IRA_REG_EQUIV which should
2683 be already calculated. */
2685 setup_reg_equiv_init (void)
2688 int max_regno
= max_reg_num ();
2690 for (i
= 0; i
< max_regno
; i
++)
2691 reg_equiv_init (i
) = ira_reg_equiv
[i
].init_insns
;
2694 /* Update equiv regno from movement of FROM_REGNO to TO_REGNO. INSNS
2695 are insns which were generated for such movement. It is assumed
2696 that FROM_REGNO and TO_REGNO always have the same value at the
2697 point of any move containing such registers. This function is used
2698 to update equiv info for register shuffles on the region borders
2699 and for caller save/restore insns. */
2701 ira_update_equiv_info_by_shuffle_insn (int to_regno
, int from_regno
, rtx_insn
*insns
)
2706 if (! ira_reg_equiv
[from_regno
].defined_p
2707 && (! ira_reg_equiv
[to_regno
].defined_p
2708 || ((x
= ira_reg_equiv
[to_regno
].memory
) != NULL_RTX
2709 && ! MEM_READONLY_P (x
))))
2712 if (NEXT_INSN (insn
) != NULL_RTX
)
2714 if (! ira_reg_equiv
[to_regno
].defined_p
)
2716 ira_assert (ira_reg_equiv
[to_regno
].init_insns
== NULL_RTX
);
2719 ira_reg_equiv
[to_regno
].defined_p
= false;
2720 ira_reg_equiv
[to_regno
].memory
2721 = ira_reg_equiv
[to_regno
].constant
2722 = ira_reg_equiv
[to_regno
].invariant
2723 = ira_reg_equiv
[to_regno
].init_insns
= NULL
;
2724 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2725 fprintf (ira_dump_file
,
2726 " Invalidating equiv info for reg %d\n", to_regno
);
2729 /* It is possible that FROM_REGNO still has no equivalence because
2730 in shuffles to_regno<-from_regno and from_regno<-to_regno the 2nd
2731 insn was not processed yet. */
2732 if (ira_reg_equiv
[from_regno
].defined_p
)
2734 ira_reg_equiv
[to_regno
].defined_p
= true;
2735 if ((x
= ira_reg_equiv
[from_regno
].memory
) != NULL_RTX
)
2737 ira_assert (ira_reg_equiv
[from_regno
].invariant
== NULL_RTX
2738 && ira_reg_equiv
[from_regno
].constant
== NULL_RTX
);
2739 ira_assert (ira_reg_equiv
[to_regno
].memory
== NULL_RTX
2740 || rtx_equal_p (ira_reg_equiv
[to_regno
].memory
, x
));
2741 ira_reg_equiv
[to_regno
].memory
= x
;
2742 if (! MEM_READONLY_P (x
))
2743 /* We don't add the insn to insn init list because memory
2744 equivalence is just to say what memory is better to use
2745 when the pseudo is spilled. */
2748 else if ((x
= ira_reg_equiv
[from_regno
].constant
) != NULL_RTX
)
2750 ira_assert (ira_reg_equiv
[from_regno
].invariant
== NULL_RTX
);
2751 ira_assert (ira_reg_equiv
[to_regno
].constant
== NULL_RTX
2752 || rtx_equal_p (ira_reg_equiv
[to_regno
].constant
, x
));
2753 ira_reg_equiv
[to_regno
].constant
= x
;
2757 x
= ira_reg_equiv
[from_regno
].invariant
;
2758 ira_assert (x
!= NULL_RTX
);
2759 ira_assert (ira_reg_equiv
[to_regno
].invariant
== NULL_RTX
2760 || rtx_equal_p (ira_reg_equiv
[to_regno
].invariant
, x
));
2761 ira_reg_equiv
[to_regno
].invariant
= x
;
2763 if (find_reg_note (insn
, REG_EQUIV
, x
) == NULL_RTX
)
2765 note
= set_unique_reg_note (insn
, REG_EQUIV
, copy_rtx (x
));
2766 gcc_assert (note
!= NULL_RTX
);
2767 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2769 fprintf (ira_dump_file
,
2770 " Adding equiv note to insn %u for reg %d ",
2771 INSN_UID (insn
), to_regno
);
2772 dump_value_slim (ira_dump_file
, x
, 1);
2773 fprintf (ira_dump_file
, "\n");
2777 ira_reg_equiv
[to_regno
].init_insns
2778 = gen_rtx_INSN_LIST (VOIDmode
, insn
,
2779 ira_reg_equiv
[to_regno
].init_insns
);
2780 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2781 fprintf (ira_dump_file
,
2782 " Adding equiv init move insn %u to reg %d\n",
2783 INSN_UID (insn
), to_regno
);
2786 /* Fix values of array REG_EQUIV_INIT after live range splitting done
2789 fix_reg_equiv_init (void)
2791 int max_regno
= max_reg_num ();
2792 int i
, new_regno
, max
;
2794 rtx_insn_list
*x
, *next
, *prev
;
2797 if (max_regno_before_ira
< max_regno
)
2799 max
= vec_safe_length (reg_equivs
);
2801 for (i
= FIRST_PSEUDO_REGISTER
; i
< max
; i
++)
2802 for (prev
= NULL
, x
= reg_equiv_init (i
);
2808 set
= single_set (insn
);
2809 ira_assert (set
!= NULL_RTX
2810 && (REG_P (SET_DEST (set
)) || REG_P (SET_SRC (set
))));
2811 if (REG_P (SET_DEST (set
))
2812 && ((int) REGNO (SET_DEST (set
)) == i
2813 || (int) ORIGINAL_REGNO (SET_DEST (set
)) == i
))
2814 new_regno
= REGNO (SET_DEST (set
));
2815 else if (REG_P (SET_SRC (set
))
2816 && ((int) REGNO (SET_SRC (set
)) == i
2817 || (int) ORIGINAL_REGNO (SET_SRC (set
)) == i
))
2818 new_regno
= REGNO (SET_SRC (set
));
2825 /* Remove the wrong list element. */
2826 if (prev
== NULL_RTX
)
2827 reg_equiv_init (i
) = next
;
2829 XEXP (prev
, 1) = next
;
2830 XEXP (x
, 1) = reg_equiv_init (new_regno
);
2831 reg_equiv_init (new_regno
) = x
;
2837 #ifdef ENABLE_IRA_CHECKING
2838 /* Print redundant memory-memory copies. */
2840 print_redundant_copies (void)
2844 ira_copy_t cp
, next_cp
;
2845 ira_allocno_iterator ai
;
2847 FOR_EACH_ALLOCNO (a
, ai
)
2849 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2852 hard_regno
= ALLOCNO_HARD_REGNO (a
);
2853 if (hard_regno
>= 0)
2855 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
2857 next_cp
= cp
->next_first_allocno_copy
;
2860 next_cp
= cp
->next_second_allocno_copy
;
2861 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
2862 && cp
->insn
!= NULL_RTX
2863 && ALLOCNO_HARD_REGNO (cp
->first
) == hard_regno
)
2864 fprintf (ira_dump_file
,
2865 " Redundant move from %d(freq %d):%d\n",
2866 INSN_UID (cp
->insn
), cp
->freq
, hard_regno
);
2872 /* Setup preferred and alternative classes for new pseudo-registers
2873 created by IRA starting with START. */
2875 setup_preferred_alternate_classes_for_new_pseudos (int start
)
2878 int max_regno
= max_reg_num ();
2880 for (i
= start
; i
< max_regno
; i
++)
2882 old_regno
= ORIGINAL_REGNO (regno_reg_rtx
[i
]);
2883 ira_assert (i
!= old_regno
);
2884 setup_reg_classes (i
, reg_preferred_class (old_regno
),
2885 reg_alternate_class (old_regno
),
2886 reg_allocno_class (old_regno
));
2887 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
2888 fprintf (ira_dump_file
,
2889 " New r%d: setting preferred %s, alternative %s\n",
2890 i
, reg_class_names
[reg_preferred_class (old_regno
)],
2891 reg_class_names
[reg_alternate_class (old_regno
)]);
2896 /* The number of entries allocated in reg_info. */
2897 static int allocated_reg_info_size
;
2899 /* Regional allocation can create new pseudo-registers. This function
2900 expands some arrays for pseudo-registers. */
2902 expand_reg_info (void)
2905 int size
= max_reg_num ();
2908 for (i
= allocated_reg_info_size
; i
< size
; i
++)
2909 setup_reg_classes (i
, GENERAL_REGS
, ALL_REGS
, GENERAL_REGS
);
2910 setup_preferred_alternate_classes_for_new_pseudos (allocated_reg_info_size
);
2911 allocated_reg_info_size
= size
;
2914 /* Return TRUE if there is too high register pressure in the function.
2915 It is used to decide when stack slot sharing is worth to do. */
2917 too_high_register_pressure_p (void)
2920 enum reg_class pclass
;
2922 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2924 pclass
= ira_pressure_classes
[i
];
2925 if (ira_loop_tree_root
->reg_pressure
[pclass
] > 10000)
2933 /* Indicate that hard register number FROM was eliminated and replaced with
2934 an offset from hard register number TO. The status of hard registers live
2935 at the start of a basic block is updated by replacing a use of FROM with
2939 mark_elimination (int from
, int to
)
2944 FOR_EACH_BB_FN (bb
, cfun
)
2947 if (bitmap_bit_p (r
, from
))
2949 bitmap_clear_bit (r
, from
);
2950 bitmap_set_bit (r
, to
);
2954 r
= DF_LIVE_IN (bb
);
2955 if (bitmap_bit_p (r
, from
))
2957 bitmap_clear_bit (r
, from
);
2958 bitmap_set_bit (r
, to
);
2965 /* The length of the following array. */
2966 int ira_reg_equiv_len
;
2968 /* Info about equiv. info for each register. */
2969 struct ira_reg_equiv_s
*ira_reg_equiv
;
2971 /* Expand ira_reg_equiv if necessary. */
2973 ira_expand_reg_equiv (void)
2975 int old
= ira_reg_equiv_len
;
2977 if (ira_reg_equiv_len
> max_reg_num ())
2979 ira_reg_equiv_len
= max_reg_num () * 3 / 2 + 1;
2981 = (struct ira_reg_equiv_s
*) xrealloc (ira_reg_equiv
,
2983 * sizeof (struct ira_reg_equiv_s
));
2984 gcc_assert (old
< ira_reg_equiv_len
);
2985 memset (ira_reg_equiv
+ old
, 0,
2986 sizeof (struct ira_reg_equiv_s
) * (ira_reg_equiv_len
- old
));
2990 init_reg_equiv (void)
2992 ira_reg_equiv_len
= 0;
2993 ira_reg_equiv
= NULL
;
2994 ira_expand_reg_equiv ();
2998 finish_reg_equiv (void)
3000 free (ira_reg_equiv
);
3007 /* Set when a REG_EQUIV note is found or created. Use to
3008 keep track of what memory accesses might be created later,
3013 /* The list of each instruction which initializes this register.
3015 NULL indicates we know nothing about this register's equivalence
3018 An INSN_LIST with a NULL insn indicates this pseudo is already
3019 known to not have a valid equivalence. */
3020 rtx_insn_list
*init_insns
;
3022 /* Loop depth is used to recognize equivalences which appear
3023 to be present within the same loop (or in an inner loop). */
3025 /* Nonzero if this had a preexisting REG_EQUIV note. */
3026 unsigned char is_arg_equivalence
: 1;
3027 /* Set when an attempt should be made to replace a register
3028 with the associated src_p entry. */
3029 unsigned char replace
: 1;
3030 /* Set if this register has no known equivalence. */
3031 unsigned char no_equiv
: 1;
3032 /* Set if this register is mentioned in a paradoxical subreg. */
3033 unsigned char pdx_subregs
: 1;
3036 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
3037 structure for that register. */
3038 static struct equivalence
*reg_equiv
;
3040 /* Used for communication between the following two functions. */
3041 struct equiv_mem_data
3043 /* A MEM that we wish to ensure remains unchanged. */
3046 /* Set true if EQUIV_MEM is modified. */
3047 bool equiv_mem_modified
;
3050 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
3051 Called via note_stores. */
3053 validate_equiv_mem_from_store (rtx dest
, const_rtx set ATTRIBUTE_UNUSED
,
3056 struct equiv_mem_data
*info
= (struct equiv_mem_data
*) data
;
3059 && reg_overlap_mentioned_p (dest
, info
->equiv_mem
))
3061 && anti_dependence (info
->equiv_mem
, dest
)))
3062 info
->equiv_mem_modified
= true;
3065 enum valid_equiv
{ valid_none
, valid_combine
, valid_reload
};
3067 /* Verify that no store between START and the death of REG invalidates
3068 MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
3069 by storing into an overlapping memory location, or with a non-const
3072 Return VALID_RELOAD if MEMREF remains valid for both reload and
3073 combine_and_move insns, VALID_COMBINE if only valid for
3074 combine_and_move_insns, and VALID_NONE otherwise. */
3075 static enum valid_equiv
3076 validate_equiv_mem (rtx_insn
*start
, rtx reg
, rtx memref
)
3080 struct equiv_mem_data info
= { memref
, false };
3081 enum valid_equiv ret
= valid_reload
;
3083 /* If the memory reference has side effects or is volatile, it isn't a
3084 valid equivalence. */
3085 if (side_effects_p (memref
))
3088 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
3093 if (find_reg_note (insn
, REG_DEAD
, reg
))
3098 /* We can combine a reg def from one insn into a reg use in
3099 another over a call if the memory is readonly or the call
3100 const/pure. However, we can't set reg_equiv notes up for
3101 reload over any call. The problem is the equivalent form
3102 may reference a pseudo which gets assigned a call
3103 clobbered hard reg. When we later replace REG with its
3104 equivalent form, the value in the call-clobbered reg has
3105 been changed and all hell breaks loose. */
3106 ret
= valid_combine
;
3107 if (!MEM_READONLY_P (memref
)
3108 && !RTL_CONST_OR_PURE_CALL_P (insn
))
3112 note_stores (insn
, validate_equiv_mem_from_store
, &info
);
3113 if (info
.equiv_mem_modified
)
3116 /* If a register mentioned in MEMREF is modified via an
3117 auto-increment, we lose the equivalence. Do the same if one
3118 dies; although we could extend the life, it doesn't seem worth
3121 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
3122 if ((REG_NOTE_KIND (note
) == REG_INC
3123 || REG_NOTE_KIND (note
) == REG_DEAD
)
3124 && REG_P (XEXP (note
, 0))
3125 && reg_overlap_mentioned_p (XEXP (note
, 0), memref
))
3132 /* Returns zero if X is known to be invariant. */
3134 equiv_init_varies_p (rtx x
)
3136 RTX_CODE code
= GET_CODE (x
);
3143 return !MEM_READONLY_P (x
) || equiv_init_varies_p (XEXP (x
, 0));
3152 return reg_equiv
[REGNO (x
)].replace
== 0 && rtx_varies_p (x
, 0);
3155 if (MEM_VOLATILE_P (x
))
3164 fmt
= GET_RTX_FORMAT (code
);
3165 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3168 if (equiv_init_varies_p (XEXP (x
, i
)))
3171 else if (fmt
[i
] == 'E')
3174 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3175 if (equiv_init_varies_p (XVECEXP (x
, i
, j
)))
3182 /* Returns nonzero if X (used to initialize register REGNO) is movable.
3183 X is only movable if the registers it uses have equivalent initializations
3184 which appear to be within the same loop (or in an inner loop) and movable
3185 or if they are not candidates for local_alloc and don't vary. */
3187 equiv_init_movable_p (rtx x
, int regno
)
3191 enum rtx_code code
= GET_CODE (x
);
3196 return equiv_init_movable_p (SET_SRC (x
), regno
);
3210 return ((reg_equiv
[REGNO (x
)].loop_depth
>= reg_equiv
[regno
].loop_depth
3211 && reg_equiv
[REGNO (x
)].replace
)
3212 || (REG_BASIC_BLOCK (REGNO (x
)) < NUM_FIXED_BLOCKS
3213 && ! rtx_varies_p (x
, 0)));
3215 case UNSPEC_VOLATILE
:
3219 if (MEM_VOLATILE_P (x
))
3228 fmt
= GET_RTX_FORMAT (code
);
3229 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3233 if (! equiv_init_movable_p (XEXP (x
, i
), regno
))
3237 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3238 if (! equiv_init_movable_p (XVECEXP (x
, i
, j
), regno
))
3246 static bool memref_referenced_p (rtx memref
, rtx x
, bool read_p
);
3248 /* Auxiliary function for memref_referenced_p. Process setting X for
3251 process_set_for_memref_referenced_p (rtx memref
, rtx x
)
3253 /* If we are setting a MEM, it doesn't count (its address does), but any
3254 other SET_DEST that has a MEM in it is referencing the MEM. */
3257 if (memref_referenced_p (memref
, XEXP (x
, 0), true))
3260 else if (memref_referenced_p (memref
, x
, false))
3266 /* TRUE if X references a memory location (as a read if READ_P) that
3267 would be affected by a store to MEMREF. */
3269 memref_referenced_p (rtx memref
, rtx x
, bool read_p
)
3273 enum rtx_code code
= GET_CODE (x
);
3287 return (reg_equiv
[REGNO (x
)].replacement
3288 && memref_referenced_p (memref
,
3289 reg_equiv
[REGNO (x
)].replacement
, read_p
));
3292 /* Memory X might have another effective type than MEMREF. */
3293 if (read_p
|| true_dependence (memref
, VOIDmode
, x
))
3298 if (process_set_for_memref_referenced_p (memref
, SET_DEST (x
)))
3301 return memref_referenced_p (memref
, SET_SRC (x
), true);
3304 if (process_set_for_memref_referenced_p (memref
, XEXP (x
, 0)))
3313 if (process_set_for_memref_referenced_p (memref
, XEXP (x
, 0)))
3316 return memref_referenced_p (memref
, XEXP (x
, 0), true);
3320 /* op0 = op0 + op1 */
3321 if (process_set_for_memref_referenced_p (memref
, XEXP (x
, 0)))
3324 if (memref_referenced_p (memref
, XEXP (x
, 0), true))
3327 return memref_referenced_p (memref
, XEXP (x
, 1), true);
3333 fmt
= GET_RTX_FORMAT (code
);
3334 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3338 if (memref_referenced_p (memref
, XEXP (x
, i
), read_p
))
3342 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3343 if (memref_referenced_p (memref
, XVECEXP (x
, i
, j
), read_p
))
3351 /* TRUE if some insn in the range (START, END] references a memory location
3352 that would be affected by a store to MEMREF.
3354 Callers should not call this routine if START is after END in the
3358 memref_used_between_p (rtx memref
, rtx_insn
*start
, rtx_insn
*end
)
3362 for (insn
= NEXT_INSN (start
);
3363 insn
&& insn
!= NEXT_INSN (end
);
3364 insn
= NEXT_INSN (insn
))
3366 if (!NONDEBUG_INSN_P (insn
))
3369 if (memref_referenced_p (memref
, PATTERN (insn
), false))
3372 /* Nonconst functions may access memory. */
3373 if (CALL_P (insn
) && (! RTL_CONST_CALL_P (insn
)))
3377 gcc_assert (insn
== NEXT_INSN (end
));
3381 /* Mark REG as having no known equivalence.
3382 Some instructions might have been processed before and furnished
3383 with REG_EQUIV notes for this register; these notes will have to be
3385 STORE is the piece of RTL that does the non-constant / conflicting
3386 assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
3387 but needs to be there because this function is called from note_stores. */
3389 no_equiv (rtx reg
, const_rtx store ATTRIBUTE_UNUSED
,
3390 void *data ATTRIBUTE_UNUSED
)
3393 rtx_insn_list
*list
;
3397 regno
= REGNO (reg
);
3398 reg_equiv
[regno
].no_equiv
= 1;
3399 list
= reg_equiv
[regno
].init_insns
;
3400 if (list
&& list
->insn () == NULL
)
3402 reg_equiv
[regno
].init_insns
= gen_rtx_INSN_LIST (VOIDmode
, NULL_RTX
, NULL
);
3403 reg_equiv
[regno
].replacement
= NULL_RTX
;
3404 /* This doesn't matter for equivalences made for argument registers, we
3405 should keep their initialization insns. */
3406 if (reg_equiv
[regno
].is_arg_equivalence
)
3408 ira_reg_equiv
[regno
].defined_p
= false;
3409 ira_reg_equiv
[regno
].init_insns
= NULL
;
3410 for (; list
; list
= list
->next ())
3412 rtx_insn
*insn
= list
->insn ();
3413 remove_note (insn
, find_reg_note (insn
, REG_EQUIV
, NULL_RTX
));
3417 /* Check whether the SUBREG is a paradoxical subreg and set the result
3421 set_paradoxical_subreg (rtx_insn
*insn
)
3423 subrtx_iterator::array_type array
;
3424 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
3426 const_rtx subreg
= *iter
;
3427 if (GET_CODE (subreg
) == SUBREG
)
3429 const_rtx reg
= SUBREG_REG (subreg
);
3430 if (REG_P (reg
) && paradoxical_subreg_p (subreg
))
3431 reg_equiv
[REGNO (reg
)].pdx_subregs
= true;
3436 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
3437 equivalent replacement. */
3440 adjust_cleared_regs (rtx loc
, const_rtx old_rtx ATTRIBUTE_UNUSED
, void *data
)
3444 bitmap cleared_regs
= (bitmap
) data
;
3445 if (bitmap_bit_p (cleared_regs
, REGNO (loc
)))
3446 return simplify_replace_fn_rtx (copy_rtx (*reg_equiv
[REGNO (loc
)].src_p
),
3447 NULL_RTX
, adjust_cleared_regs
, data
);
3452 /* Given register REGNO is set only once, return true if the defining
3453 insn dominates all uses. */
3456 def_dominates_uses (int regno
)
3458 df_ref def
= DF_REG_DEF_CHAIN (regno
);
3460 struct df_insn_info
*def_info
= DF_REF_INSN_INFO (def
);
3461 /* If this is an artificial def (eh handler regs, hard frame pointer
3462 for non-local goto, regs defined on function entry) then def_info
3463 is NULL and the reg is always live before any use. We might
3464 reasonably return true in that case, but since the only call
3465 of this function is currently here in ira.c when we are looking
3466 at a defining insn we can't have an artificial def as that would
3467 bump DF_REG_DEF_COUNT. */
3468 gcc_assert (DF_REG_DEF_COUNT (regno
) == 1 && def_info
!= NULL
);
3470 rtx_insn
*def_insn
= DF_REF_INSN (def
);
3471 basic_block def_bb
= BLOCK_FOR_INSN (def_insn
);
3473 for (df_ref use
= DF_REG_USE_CHAIN (regno
);
3475 use
= DF_REF_NEXT_REG (use
))
3477 struct df_insn_info
*use_info
= DF_REF_INSN_INFO (use
);
3478 /* Only check real uses, not artificial ones. */
3481 rtx_insn
*use_insn
= DF_REF_INSN (use
);
3482 if (!DEBUG_INSN_P (use_insn
))
3484 basic_block use_bb
= BLOCK_FOR_INSN (use_insn
);
3485 if (use_bb
!= def_bb
3486 ? !dominated_by_p (CDI_DOMINATORS
, use_bb
, def_bb
)
3487 : DF_INSN_INFO_LUID (use_info
) < DF_INSN_INFO_LUID (def_info
))
3495 /* Scan the instructions before update_equiv_regs. Record which registers
3496 are referenced as paradoxical subregs. Also check for cases in which
3497 the current function needs to save a register that one of its call
3498 instructions clobbers.
3500 These things are logically unrelated, but it's more efficient to do
3504 update_equiv_regs_prescan (void)
3508 function_abi_aggregator callee_abis
;
3510 FOR_EACH_BB_FN (bb
, cfun
)
3511 FOR_BB_INSNS (bb
, insn
)
3512 if (NONDEBUG_INSN_P (insn
))
3514 set_paradoxical_subreg (insn
);
3516 callee_abis
.note_callee_abi (insn_callee_abi (insn
));
3519 HARD_REG_SET extra_caller_saves
= callee_abis
.caller_save_regs (*crtl
->abi
);
3520 if (!hard_reg_set_empty_p (extra_caller_saves
))
3521 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; ++regno
)
3522 if (TEST_HARD_REG_BIT (extra_caller_saves
, regno
))
3523 df_set_regs_ever_live (regno
, true);
3526 /* Find registers that are equivalent to a single value throughout the
3527 compilation (either because they can be referenced in memory or are
3528 set once from a single constant). Lower their priority for a
3531 If such a register is only referenced once, try substituting its
3532 value into the using insn. If it succeeds, we can eliminate the
3533 register completely.
3535 Initialize init_insns in ira_reg_equiv array. */
3537 update_equiv_regs (void)
3542 /* Scan the insns and find which registers have equivalences. Do this
3543 in a separate scan of the insns because (due to -fcse-follow-jumps)
3544 a register can be set below its use. */
3545 bitmap setjmp_crosses
= regstat_get_setjmp_crosses ();
3546 FOR_EACH_BB_FN (bb
, cfun
)
3548 int loop_depth
= bb_loop_depth (bb
);
3550 for (insn
= BB_HEAD (bb
);
3551 insn
!= NEXT_INSN (BB_END (bb
));
3552 insn
= NEXT_INSN (insn
))
3559 if (! INSN_P (insn
))
3562 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
3563 if (REG_NOTE_KIND (note
) == REG_INC
)
3564 no_equiv (XEXP (note
, 0), note
, NULL
);
3566 set
= single_set (insn
);
3568 /* If this insn contains more (or less) than a single SET,
3569 only mark all destinations as having no known equivalence. */
3571 || side_effects_p (SET_SRC (set
)))
3573 note_pattern_stores (PATTERN (insn
), no_equiv
, NULL
);
3576 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3580 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
3582 rtx part
= XVECEXP (PATTERN (insn
), 0, i
);
3584 note_pattern_stores (part
, no_equiv
, NULL
);
3588 dest
= SET_DEST (set
);
3589 src
= SET_SRC (set
);
3591 /* See if this is setting up the equivalence between an argument
3592 register and its stack slot. */
3593 note
= find_reg_note (insn
, REG_EQUIV
, NULL_RTX
);
3596 gcc_assert (REG_P (dest
));
3597 regno
= REGNO (dest
);
3599 /* Note that we don't want to clear init_insns in
3600 ira_reg_equiv even if there are multiple sets of this
3602 reg_equiv
[regno
].is_arg_equivalence
= 1;
3604 /* The insn result can have equivalence memory although
3605 the equivalence is not set up by the insn. We add
3606 this insn to init insns as it is a flag for now that
3607 regno has an equivalence. We will remove the insn
3608 from init insn list later. */
3609 if (rtx_equal_p (src
, XEXP (note
, 0)) || MEM_P (XEXP (note
, 0)))
3610 ira_reg_equiv
[regno
].init_insns
3611 = gen_rtx_INSN_LIST (VOIDmode
, insn
,
3612 ira_reg_equiv
[regno
].init_insns
);
3614 /* Continue normally in case this is a candidate for
3621 /* We only handle the case of a pseudo register being set
3622 once, or always to the same value. */
3623 /* ??? The mn10200 port breaks if we add equivalences for
3624 values that need an ADDRESS_REGS register and set them equivalent
3625 to a MEM of a pseudo. The actual problem is in the over-conservative
3626 handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
3627 calculate_needs, but we traditionally work around this problem
3628 here by rejecting equivalences when the destination is in a register
3629 that's likely spilled. This is fragile, of course, since the
3630 preferred class of a pseudo depends on all instructions that set
3634 || (regno
= REGNO (dest
)) < FIRST_PSEUDO_REGISTER
3635 || (reg_equiv
[regno
].init_insns
3636 && reg_equiv
[regno
].init_insns
->insn () == NULL
)
3637 || (targetm
.class_likely_spilled_p (reg_preferred_class (regno
))
3638 && MEM_P (src
) && ! reg_equiv
[regno
].is_arg_equivalence
))
3640 /* This might be setting a SUBREG of a pseudo, a pseudo that is
3641 also set somewhere else to a constant. */
3642 note_pattern_stores (set
, no_equiv
, NULL
);
3646 /* Don't set reg mentioned in a paradoxical subreg
3647 equivalent to a mem. */
3648 if (MEM_P (src
) && reg_equiv
[regno
].pdx_subregs
)
3650 note_pattern_stores (set
, no_equiv
, NULL
);
3654 note
= find_reg_note (insn
, REG_EQUAL
, NULL_RTX
);
3656 /* cse sometimes generates function invariants, but doesn't put a
3657 REG_EQUAL note on the insn. Since this note would be redundant,
3658 there's no point creating it earlier than here. */
3659 if (! note
&& ! rtx_varies_p (src
, 0))
3660 note
= set_unique_reg_note (insn
, REG_EQUAL
, copy_rtx (src
));
3662 /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
3663 since it represents a function call. */
3664 if (note
&& GET_CODE (XEXP (note
, 0)) == EXPR_LIST
)
3667 if (DF_REG_DEF_COUNT (regno
) != 1)
3669 bool equal_p
= true;
3670 rtx_insn_list
*list
;
3672 /* If we have already processed this pseudo and determined it
3673 cannot have an equivalence, then honor that decision. */
3674 if (reg_equiv
[regno
].no_equiv
)
3678 || rtx_varies_p (XEXP (note
, 0), 0)
3679 || (reg_equiv
[regno
].replacement
3680 && ! rtx_equal_p (XEXP (note
, 0),
3681 reg_equiv
[regno
].replacement
)))
3683 no_equiv (dest
, set
, NULL
);
3687 list
= reg_equiv
[regno
].init_insns
;
3688 for (; list
; list
= list
->next ())
3693 insn_tmp
= list
->insn ();
3694 note_tmp
= find_reg_note (insn_tmp
, REG_EQUAL
, NULL_RTX
);
3695 gcc_assert (note_tmp
);
3696 if (! rtx_equal_p (XEXP (note
, 0), XEXP (note_tmp
, 0)))
3705 no_equiv (dest
, set
, NULL
);
3710 /* Record this insn as initializing this register. */
3711 reg_equiv
[regno
].init_insns
3712 = gen_rtx_INSN_LIST (VOIDmode
, insn
, reg_equiv
[regno
].init_insns
);
3714 /* If this register is known to be equal to a constant, record that
3715 it is always equivalent to the constant.
3716 Note that it is possible to have a register use before
3717 the def in loops (see gcc.c-torture/execute/pr79286.c)
3718 where the reg is undefined on first use. If the def insn
3719 won't trap we can use it as an equivalence, effectively
3720 choosing the "undefined" value for the reg to be the
3721 same as the value set by the def. */
3722 if (DF_REG_DEF_COUNT (regno
) == 1
3724 && !rtx_varies_p (XEXP (note
, 0), 0)
3725 && (!may_trap_or_fault_p (XEXP (note
, 0))
3726 || def_dominates_uses (regno
)))
3728 rtx note_value
= XEXP (note
, 0);
3729 remove_note (insn
, note
);
3730 set_unique_reg_note (insn
, REG_EQUIV
, note_value
);
3733 /* If this insn introduces a "constant" register, decrease the priority
3734 of that register. Record this insn if the register is only used once
3735 more and the equivalence value is the same as our source.
3737 The latter condition is checked for two reasons: First, it is an
3738 indication that it may be more efficient to actually emit the insn
3739 as written (if no registers are available, reload will substitute
3740 the equivalence). Secondly, it avoids problems with any registers
3741 dying in this insn whose death notes would be missed.
3743 If we don't have a REG_EQUIV note, see if this insn is loading
3744 a register used only in one basic block from a MEM. If so, and the
3745 MEM remains unchanged for the life of the register, add a REG_EQUIV
3747 note
= find_reg_note (insn
, REG_EQUIV
, NULL_RTX
);
3749 rtx replacement
= NULL_RTX
;
3751 replacement
= XEXP (note
, 0);
3752 else if (REG_BASIC_BLOCK (regno
) >= NUM_FIXED_BLOCKS
3753 && MEM_P (SET_SRC (set
)))
3755 enum valid_equiv validity
;
3756 validity
= validate_equiv_mem (insn
, dest
, SET_SRC (set
));
3757 if (validity
!= valid_none
)
3759 replacement
= copy_rtx (SET_SRC (set
));
3760 if (validity
== valid_reload
)
3761 note
= set_unique_reg_note (insn
, REG_EQUIV
, replacement
);
3765 /* If we haven't done so, record for reload that this is an
3766 equivalencing insn. */
3767 if (note
&& !reg_equiv
[regno
].is_arg_equivalence
)
3768 ira_reg_equiv
[regno
].init_insns
3769 = gen_rtx_INSN_LIST (VOIDmode
, insn
,
3770 ira_reg_equiv
[regno
].init_insns
);
3774 reg_equiv
[regno
].replacement
= replacement
;
3775 reg_equiv
[regno
].src_p
= &SET_SRC (set
);
3776 reg_equiv
[regno
].loop_depth
= (short) loop_depth
;
3778 /* Don't mess with things live during setjmp. */
3779 if (optimize
&& !bitmap_bit_p (setjmp_crosses
, regno
))
3781 /* If the register is referenced exactly twice, meaning it is
3782 set once and used once, indicate that the reference may be
3783 replaced by the equivalence we computed above. Do this
3784 even if the register is only used in one block so that
3785 dependencies can be handled where the last register is
3786 used in a different block (i.e. HIGH / LO_SUM sequences)
3787 and to reduce the number of registers alive across
3790 if (REG_N_REFS (regno
) == 2
3791 && (rtx_equal_p (replacement
, src
)
3792 || ! equiv_init_varies_p (src
))
3793 && NONJUMP_INSN_P (insn
)
3794 && equiv_init_movable_p (PATTERN (insn
), regno
))
3795 reg_equiv
[regno
].replace
= 1;
3802 /* For insns that set a MEM to the contents of a REG that is only used
3803 in a single basic block, see if the register is always equivalent
3804 to that memory location and if moving the store from INSN to the
3805 insn that sets REG is safe. If so, put a REG_EQUIV note on the
3806 initializing insn. */
3808 add_store_equivs (void)
3810 auto_bitmap seen_insns
;
3812 for (rtx_insn
*insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
3816 rtx_insn
*init_insn
;
3818 bitmap_set_bit (seen_insns
, INSN_UID (insn
));
3820 if (! INSN_P (insn
))
3823 set
= single_set (insn
);
3827 dest
= SET_DEST (set
);
3828 src
= SET_SRC (set
);
3830 /* Don't add a REG_EQUIV note if the insn already has one. The existing
3831 REG_EQUIV is likely more useful than the one we are adding. */
3832 if (MEM_P (dest
) && REG_P (src
)
3833 && (regno
= REGNO (src
)) >= FIRST_PSEUDO_REGISTER
3834 && REG_BASIC_BLOCK (regno
) >= NUM_FIXED_BLOCKS
3835 && DF_REG_DEF_COUNT (regno
) == 1
3836 && ! reg_equiv
[regno
].pdx_subregs
3837 && reg_equiv
[regno
].init_insns
!= NULL
3838 && (init_insn
= reg_equiv
[regno
].init_insns
->insn ()) != 0
3839 && bitmap_bit_p (seen_insns
, INSN_UID (init_insn
))
3840 && ! find_reg_note (init_insn
, REG_EQUIV
, NULL_RTX
)
3841 && validate_equiv_mem (init_insn
, src
, dest
) == valid_reload
3842 && ! memref_used_between_p (dest
, init_insn
, insn
)
3843 /* Attaching a REG_EQUIV note will fail if INIT_INSN has
3845 && set_unique_reg_note (init_insn
, REG_EQUIV
, copy_rtx (dest
)))
3847 /* This insn makes the equivalence, not the one initializing
3849 ira_reg_equiv
[regno
].init_insns
3850 = gen_rtx_INSN_LIST (VOIDmode
, insn
, NULL_RTX
);
3851 df_notes_rescan (init_insn
);
3854 "Adding REG_EQUIV to insn %d for source of insn %d\n",
3855 INSN_UID (init_insn
),
3861 /* Scan all regs killed in an insn to see if any of them are registers
3862 only used that once. If so, see if we can replace the reference
3863 with the equivalent form. If we can, delete the initializing
3864 reference and this register will go away. If we can't replace the
3865 reference, and the initializing reference is within the same loop
3866 (or in an inner loop), then move the register initialization just
3867 before the use, so that they are in the same basic block. */
3869 combine_and_move_insns (void)
3871 auto_bitmap cleared_regs
;
3872 int max
= max_reg_num ();
3874 for (int regno
= FIRST_PSEUDO_REGISTER
; regno
< max
; regno
++)
3876 if (!reg_equiv
[regno
].replace
)
3879 rtx_insn
*use_insn
= 0;
3880 for (df_ref use
= DF_REG_USE_CHAIN (regno
);
3882 use
= DF_REF_NEXT_REG (use
))
3883 if (DF_REF_INSN_INFO (use
))
3885 if (DEBUG_INSN_P (DF_REF_INSN (use
)))
3887 gcc_assert (!use_insn
);
3888 use_insn
= DF_REF_INSN (use
);
3890 gcc_assert (use_insn
);
3892 /* Don't substitute into jumps. indirect_jump_optimize does
3893 this for anything we are prepared to handle. */
3894 if (JUMP_P (use_insn
))
3897 /* Also don't substitute into a conditional trap insn -- it can become
3898 an unconditional trap, and that is a flow control insn. */
3899 if (GET_CODE (PATTERN (use_insn
)) == TRAP_IF
)
3902 df_ref def
= DF_REG_DEF_CHAIN (regno
);
3903 gcc_assert (DF_REG_DEF_COUNT (regno
) == 1 && DF_REF_INSN_INFO (def
));
3904 rtx_insn
*def_insn
= DF_REF_INSN (def
);
3906 /* We may not move instructions that can throw, since that
3907 changes basic block boundaries and we are not prepared to
3908 adjust the CFG to match. */
3909 if (can_throw_internal (def_insn
))
3912 /* Instructions with multiple sets can only be moved if DF analysis is
3913 performed for all of the registers set. See PR91052. */
3914 if (multiple_sets (def_insn
))
3917 basic_block use_bb
= BLOCK_FOR_INSN (use_insn
);
3918 basic_block def_bb
= BLOCK_FOR_INSN (def_insn
);
3919 if (bb_loop_depth (use_bb
) > bb_loop_depth (def_bb
))
3922 if (asm_noperands (PATTERN (def_insn
)) < 0
3923 && validate_replace_rtx (regno_reg_rtx
[regno
],
3924 *reg_equiv
[regno
].src_p
, use_insn
))
3927 /* Append the REG_DEAD notes from def_insn. */
3928 for (rtx
*p
= ®_NOTES (def_insn
); (link
= *p
) != 0; )
3930 if (REG_NOTE_KIND (XEXP (link
, 0)) == REG_DEAD
)
3932 *p
= XEXP (link
, 1);
3933 XEXP (link
, 1) = REG_NOTES (use_insn
);
3934 REG_NOTES (use_insn
) = link
;
3937 p
= &XEXP (link
, 1);
3940 remove_death (regno
, use_insn
);
3941 SET_REG_N_REFS (regno
, 0);
3942 REG_FREQ (regno
) = 0;
3944 FOR_EACH_INSN_USE (use
, def_insn
)
3946 unsigned int use_regno
= DF_REF_REGNO (use
);
3947 if (!HARD_REGISTER_NUM_P (use_regno
))
3948 reg_equiv
[use_regno
].replace
= 0;
3951 delete_insn (def_insn
);
3953 reg_equiv
[regno
].init_insns
= NULL
;
3954 ira_reg_equiv
[regno
].init_insns
= NULL
;
3955 bitmap_set_bit (cleared_regs
, regno
);
3958 /* Move the initialization of the register to just before
3959 USE_INSN. Update the flow information. */
3960 else if (prev_nondebug_insn (use_insn
) != def_insn
)
3964 new_insn
= emit_insn_before (PATTERN (def_insn
), use_insn
);
3965 REG_NOTES (new_insn
) = REG_NOTES (def_insn
);
3966 REG_NOTES (def_insn
) = 0;
3967 /* Rescan it to process the notes. */
3968 df_insn_rescan (new_insn
);
3970 /* Make sure this insn is recognized before reload begins,
3971 otherwise eliminate_regs_in_insn will die. */
3972 INSN_CODE (new_insn
) = INSN_CODE (def_insn
);
3974 delete_insn (def_insn
);
3976 XEXP (reg_equiv
[regno
].init_insns
, 0) = new_insn
;
3978 REG_BASIC_BLOCK (regno
) = use_bb
->index
;
3979 REG_N_CALLS_CROSSED (regno
) = 0;
3981 if (use_insn
== BB_HEAD (use_bb
))
3982 BB_HEAD (use_bb
) = new_insn
;
3984 /* We know regno dies in use_insn, but inside a loop
3985 REG_DEAD notes might be missing when def_insn was in
3986 another basic block. However, when we move def_insn into
3987 this bb we'll definitely get a REG_DEAD note and reload
3988 will see the death. It's possible that update_equiv_regs
3989 set up an equivalence referencing regno for a reg set by
3990 use_insn, when regno was seen as non-local. Now that
3991 regno is local to this block, and dies, such an
3992 equivalence is invalid. */
3993 if (find_reg_note (use_insn
, REG_EQUIV
, regno_reg_rtx
[regno
]))
3995 rtx set
= single_set (use_insn
);
3996 if (set
&& REG_P (SET_DEST (set
)))
3997 no_equiv (SET_DEST (set
), set
, NULL
);
4000 ira_reg_equiv
[regno
].init_insns
4001 = gen_rtx_INSN_LIST (VOIDmode
, new_insn
, NULL_RTX
);
4002 bitmap_set_bit (cleared_regs
, regno
);
4006 if (!bitmap_empty_p (cleared_regs
))
4010 FOR_EACH_BB_FN (bb
, cfun
)
4012 bitmap_and_compl_into (DF_LR_IN (bb
), cleared_regs
);
4013 bitmap_and_compl_into (DF_LR_OUT (bb
), cleared_regs
);
4016 bitmap_and_compl_into (DF_LIVE_IN (bb
), cleared_regs
);
4017 bitmap_and_compl_into (DF_LIVE_OUT (bb
), cleared_regs
);
4020 /* Last pass - adjust debug insns referencing cleared regs. */
4021 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4022 for (rtx_insn
*insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
4023 if (DEBUG_BIND_INSN_P (insn
))
4025 rtx old_loc
= INSN_VAR_LOCATION_LOC (insn
);
4026 INSN_VAR_LOCATION_LOC (insn
)
4027 = simplify_replace_fn_rtx (old_loc
, NULL_RTX
,
4028 adjust_cleared_regs
,
4029 (void *) cleared_regs
);
4030 if (old_loc
!= INSN_VAR_LOCATION_LOC (insn
))
4031 df_insn_rescan (insn
);
4036 /* A pass over indirect jumps, converting simple cases to direct jumps.
4037 Combine does this optimization too, but only within a basic block. */
4039 indirect_jump_optimize (void)
4042 bool rebuild_p
= false;
4044 FOR_EACH_BB_REVERSE_FN (bb
, cfun
)
4046 rtx_insn
*insn
= BB_END (bb
);
4048 || find_reg_note (insn
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
4051 rtx x
= pc_set (insn
);
4052 if (!x
|| !REG_P (SET_SRC (x
)))
4055 int regno
= REGNO (SET_SRC (x
));
4056 if (DF_REG_DEF_COUNT (regno
) == 1)
4058 df_ref def
= DF_REG_DEF_CHAIN (regno
);
4059 if (!DF_REF_IS_ARTIFICIAL (def
))
4061 rtx_insn
*def_insn
= DF_REF_INSN (def
);
4063 rtx set
= single_set (def_insn
);
4064 if (set
&& GET_CODE (SET_SRC (set
)) == LABEL_REF
)
4065 lab
= SET_SRC (set
);
4068 rtx eqnote
= find_reg_note (def_insn
, REG_EQUAL
, NULL_RTX
);
4069 if (eqnote
&& GET_CODE (XEXP (eqnote
, 0)) == LABEL_REF
)
4070 lab
= XEXP (eqnote
, 0);
4072 if (lab
&& validate_replace_rtx (SET_SRC (x
), lab
, insn
))
4080 timevar_push (TV_JUMP
);
4081 rebuild_jump_labels (get_insns ());
4082 if (purge_all_dead_edges ())
4083 delete_unreachable_blocks ();
4084 timevar_pop (TV_JUMP
);
4088 /* Set up fields memory, constant, and invariant from init_insns in
4089 the structures of array ira_reg_equiv. */
4091 setup_reg_equiv (void)
4094 rtx_insn_list
*elem
, *prev_elem
, *next_elem
;
4098 for (i
= FIRST_PSEUDO_REGISTER
; i
< ira_reg_equiv_len
; i
++)
4099 for (prev_elem
= NULL
, elem
= ira_reg_equiv
[i
].init_insns
;
4101 prev_elem
= elem
, elem
= next_elem
)
4103 next_elem
= elem
->next ();
4104 insn
= elem
->insn ();
4105 set
= single_set (insn
);
4107 /* Init insns can set up equivalence when the reg is a destination or
4108 a source (in this case the destination is memory). */
4109 if (set
!= 0 && (REG_P (SET_DEST (set
)) || REG_P (SET_SRC (set
))))
4111 if ((x
= find_reg_note (insn
, REG_EQUIV
, NULL_RTX
)) != NULL
)
4114 if (REG_P (SET_DEST (set
))
4115 && REGNO (SET_DEST (set
)) == (unsigned int) i
4116 && ! rtx_equal_p (SET_SRC (set
), x
) && MEM_P (x
))
4118 /* This insn reporting the equivalence but
4119 actually not setting it. Remove it from the
4121 if (prev_elem
== NULL
)
4122 ira_reg_equiv
[i
].init_insns
= next_elem
;
4124 XEXP (prev_elem
, 1) = next_elem
;
4128 else if (REG_P (SET_DEST (set
))
4129 && REGNO (SET_DEST (set
)) == (unsigned int) i
)
4133 gcc_assert (REG_P (SET_SRC (set
))
4134 && REGNO (SET_SRC (set
)) == (unsigned int) i
);
4137 if (! function_invariant_p (x
)
4139 /* A function invariant is often CONSTANT_P but may
4140 include a register. We promise to only pass
4141 CONSTANT_P objects to LEGITIMATE_PIC_OPERAND_P. */
4142 || (CONSTANT_P (x
) && LEGITIMATE_PIC_OPERAND_P (x
)))
4144 /* It can happen that a REG_EQUIV note contains a MEM
4145 that is not a legitimate memory operand. As later
4146 stages of reload assume that all addresses found in
4147 the lra_regno_equiv_* arrays were originally
4148 legitimate, we ignore such REG_EQUIV notes. */
4149 if (memory_operand (x
, VOIDmode
))
4151 ira_reg_equiv
[i
].defined_p
= true;
4152 ira_reg_equiv
[i
].memory
= x
;
4155 else if (function_invariant_p (x
))
4159 mode
= GET_MODE (SET_DEST (set
));
4160 if (GET_CODE (x
) == PLUS
4161 || x
== frame_pointer_rtx
|| x
== arg_pointer_rtx
)
4162 /* This is PLUS of frame pointer and a constant,
4164 ira_reg_equiv
[i
].invariant
= x
;
4165 else if (targetm
.legitimate_constant_p (mode
, x
))
4166 ira_reg_equiv
[i
].constant
= x
;
4169 ira_reg_equiv
[i
].memory
= force_const_mem (mode
, x
);
4170 if (ira_reg_equiv
[i
].memory
== NULL_RTX
)
4172 ira_reg_equiv
[i
].defined_p
= false;
4173 ira_reg_equiv
[i
].init_insns
= NULL
;
4177 ira_reg_equiv
[i
].defined_p
= true;
4182 ira_reg_equiv
[i
].defined_p
= false;
4183 ira_reg_equiv
[i
].init_insns
= NULL
;
4190 /* Print chain C to FILE. */
4192 print_insn_chain (FILE *file
, class insn_chain
*c
)
4194 fprintf (file
, "insn=%d, ", INSN_UID (c
->insn
));
4195 bitmap_print (file
, &c
->live_throughout
, "live_throughout: ", ", ");
4196 bitmap_print (file
, &c
->dead_or_set
, "dead_or_set: ", "\n");
4200 /* Print all reload_insn_chains to FILE. */
4202 print_insn_chains (FILE *file
)
4204 class insn_chain
*c
;
4205 for (c
= reload_insn_chain
; c
; c
= c
->next
)
4206 print_insn_chain (file
, c
);
4209 /* Return true if pseudo REGNO should be added to set live_throughout
4210 or dead_or_set of the insn chains for reload consideration. */
4212 pseudo_for_reload_consideration_p (int regno
)
4214 /* Consider spilled pseudos too for IRA because they still have a
4215 chance to get hard-registers in the reload when IRA is used. */
4216 return (reg_renumber
[regno
] >= 0 || ira_conflicts_p
);
4219 /* Return true if we can track the individual bytes of subreg X.
4220 When returning true, set *OUTER_SIZE to the number of bytes in
4221 X itself, *INNER_SIZE to the number of bytes in the inner register
4222 and *START to the offset of the first byte. */
4224 get_subreg_tracking_sizes (rtx x
, HOST_WIDE_INT
*outer_size
,
4225 HOST_WIDE_INT
*inner_size
, HOST_WIDE_INT
*start
)
4227 rtx reg
= regno_reg_rtx
[REGNO (SUBREG_REG (x
))];
4228 return (GET_MODE_SIZE (GET_MODE (x
)).is_constant (outer_size
)
4229 && GET_MODE_SIZE (GET_MODE (reg
)).is_constant (inner_size
)
4230 && SUBREG_BYTE (x
).is_constant (start
));
4233 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] for
4234 a register with SIZE bytes, making the register live if INIT_VALUE. */
4236 init_live_subregs (bool init_value
, sbitmap
*live_subregs
,
4237 bitmap live_subregs_used
, int allocnum
, int size
)
4239 gcc_assert (size
> 0);
4241 /* Been there, done that. */
4242 if (bitmap_bit_p (live_subregs_used
, allocnum
))
4245 /* Create a new one. */
4246 if (live_subregs
[allocnum
] == NULL
)
4247 live_subregs
[allocnum
] = sbitmap_alloc (size
);
4249 /* If the entire reg was live before blasting into subregs, we need
4250 to init all of the subregs to ones else init to 0. */
4252 bitmap_ones (live_subregs
[allocnum
]);
4254 bitmap_clear (live_subregs
[allocnum
]);
4256 bitmap_set_bit (live_subregs_used
, allocnum
);
4259 /* Walk the insns of the current function and build reload_insn_chain,
4260 and record register life information. */
4262 build_insn_chain (void)
4265 class insn_chain
**p
= &reload_insn_chain
;
4267 class insn_chain
*c
= NULL
;
4268 class insn_chain
*next
= NULL
;
4269 auto_bitmap live_relevant_regs
;
4270 auto_bitmap elim_regset
;
4271 /* live_subregs is a vector used to keep accurate information about
4272 which hardregs are live in multiword pseudos. live_subregs and
4273 live_subregs_used are indexed by pseudo number. The live_subreg
4274 entry for a particular pseudo is only used if the corresponding
4275 element is non zero in live_subregs_used. The sbitmap size of
4276 live_subreg[allocno] is number of bytes that the pseudo can
4278 sbitmap
*live_subregs
= XCNEWVEC (sbitmap
, max_regno
);
4279 auto_bitmap live_subregs_used
;
4281 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
4282 if (TEST_HARD_REG_BIT (eliminable_regset
, i
))
4283 bitmap_set_bit (elim_regset
, i
);
4284 FOR_EACH_BB_REVERSE_FN (bb
, cfun
)
4289 CLEAR_REG_SET (live_relevant_regs
);
4290 bitmap_clear (live_subregs_used
);
4292 EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb
), 0, i
, bi
)
4294 if (i
>= FIRST_PSEUDO_REGISTER
)
4296 bitmap_set_bit (live_relevant_regs
, i
);
4299 EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb
),
4300 FIRST_PSEUDO_REGISTER
, i
, bi
)
4302 if (pseudo_for_reload_consideration_p (i
))
4303 bitmap_set_bit (live_relevant_regs
, i
);
4306 FOR_BB_INSNS_REVERSE (bb
, insn
)
4308 if (!NOTE_P (insn
) && !BARRIER_P (insn
))
4310 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
4313 c
= new_insn_chain ();
4320 c
->block
= bb
->index
;
4322 if (NONDEBUG_INSN_P (insn
))
4323 FOR_EACH_INSN_INFO_DEF (def
, insn_info
)
4325 unsigned int regno
= DF_REF_REGNO (def
);
4327 /* Ignore may clobbers because these are generated
4328 from calls. However, every other kind of def is
4329 added to dead_or_set. */
4330 if (!DF_REF_FLAGS_IS_SET (def
, DF_REF_MAY_CLOBBER
))
4332 if (regno
< FIRST_PSEUDO_REGISTER
)
4334 if (!fixed_regs
[regno
])
4335 bitmap_set_bit (&c
->dead_or_set
, regno
);
4337 else if (pseudo_for_reload_consideration_p (regno
))
4338 bitmap_set_bit (&c
->dead_or_set
, regno
);
4341 if ((regno
< FIRST_PSEUDO_REGISTER
4342 || reg_renumber
[regno
] >= 0
4344 && (!DF_REF_FLAGS_IS_SET (def
, DF_REF_CONDITIONAL
)))
4346 rtx reg
= DF_REF_REG (def
);
4347 HOST_WIDE_INT outer_size
, inner_size
, start
;
4349 /* We can usually track the liveness of individual
4350 bytes within a subreg. The only exceptions are
4351 subregs wrapped in ZERO_EXTRACTs and subregs whose
4352 size is not known; in those cases we need to be
4353 conservative and treat the definition as a partial
4354 definition of the full register rather than a full
4355 definition of a specific part of the register. */
4356 if (GET_CODE (reg
) == SUBREG
4357 && !DF_REF_FLAGS_IS_SET (def
, DF_REF_ZERO_EXTRACT
)
4358 && get_subreg_tracking_sizes (reg
, &outer_size
,
4359 &inner_size
, &start
))
4361 HOST_WIDE_INT last
= start
+ outer_size
;
4364 (bitmap_bit_p (live_relevant_regs
, regno
),
4365 live_subregs
, live_subregs_used
, regno
,
4368 if (!DF_REF_FLAGS_IS_SET
4369 (def
, DF_REF_STRICT_LOW_PART
))
4371 /* Expand the range to cover entire words.
4372 Bytes added here are "don't care". */
4374 = start
/ UNITS_PER_WORD
* UNITS_PER_WORD
;
4375 last
= ((last
+ UNITS_PER_WORD
- 1)
4376 / UNITS_PER_WORD
* UNITS_PER_WORD
);
4379 /* Ignore the paradoxical bits. */
4380 if (last
> SBITMAP_SIZE (live_subregs
[regno
]))
4381 last
= SBITMAP_SIZE (live_subregs
[regno
]);
4383 while (start
< last
)
4385 bitmap_clear_bit (live_subregs
[regno
], start
);
4389 if (bitmap_empty_p (live_subregs
[regno
]))
4391 bitmap_clear_bit (live_subregs_used
, regno
);
4392 bitmap_clear_bit (live_relevant_regs
, regno
);
4395 /* Set live_relevant_regs here because
4396 that bit has to be true to get us to
4397 look at the live_subregs fields. */
4398 bitmap_set_bit (live_relevant_regs
, regno
);
4402 /* DF_REF_PARTIAL is generated for
4403 subregs, STRICT_LOW_PART, and
4404 ZERO_EXTRACT. We handle the subreg
4405 case above so here we have to keep from
4406 modeling the def as a killing def. */
4407 if (!DF_REF_FLAGS_IS_SET (def
, DF_REF_PARTIAL
))
4409 bitmap_clear_bit (live_subregs_used
, regno
);
4410 bitmap_clear_bit (live_relevant_regs
, regno
);
4416 bitmap_and_compl_into (live_relevant_regs
, elim_regset
);
4417 bitmap_copy (&c
->live_throughout
, live_relevant_regs
);
4419 if (NONDEBUG_INSN_P (insn
))
4420 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
4422 unsigned int regno
= DF_REF_REGNO (use
);
4423 rtx reg
= DF_REF_REG (use
);
4425 /* DF_REF_READ_WRITE on a use means that this use
4426 is fabricated from a def that is a partial set
4427 to a multiword reg. Here, we only model the
4428 subreg case that is not wrapped in ZERO_EXTRACT
4429 precisely so we do not need to look at the
4431 if (DF_REF_FLAGS_IS_SET (use
, DF_REF_READ_WRITE
)
4432 && !DF_REF_FLAGS_IS_SET (use
, DF_REF_ZERO_EXTRACT
)
4433 && DF_REF_FLAGS_IS_SET (use
, DF_REF_SUBREG
))
4436 /* Add the last use of each var to dead_or_set. */
4437 if (!bitmap_bit_p (live_relevant_regs
, regno
))
4439 if (regno
< FIRST_PSEUDO_REGISTER
)
4441 if (!fixed_regs
[regno
])
4442 bitmap_set_bit (&c
->dead_or_set
, regno
);
4444 else if (pseudo_for_reload_consideration_p (regno
))
4445 bitmap_set_bit (&c
->dead_or_set
, regno
);
4448 if (regno
< FIRST_PSEUDO_REGISTER
4449 || pseudo_for_reload_consideration_p (regno
))
4451 HOST_WIDE_INT outer_size
, inner_size
, start
;
4452 if (GET_CODE (reg
) == SUBREG
4453 && !DF_REF_FLAGS_IS_SET (use
,
4455 | DF_REF_ZERO_EXTRACT
)
4456 && get_subreg_tracking_sizes (reg
, &outer_size
,
4457 &inner_size
, &start
))
4459 HOST_WIDE_INT last
= start
+ outer_size
;
4462 (bitmap_bit_p (live_relevant_regs
, regno
),
4463 live_subregs
, live_subregs_used
, regno
,
4466 /* Ignore the paradoxical bits. */
4467 if (last
> SBITMAP_SIZE (live_subregs
[regno
]))
4468 last
= SBITMAP_SIZE (live_subregs
[regno
]);
4470 while (start
< last
)
4472 bitmap_set_bit (live_subregs
[regno
], start
);
4477 /* Resetting the live_subregs_used is
4478 effectively saying do not use the subregs
4479 because we are reading the whole
4481 bitmap_clear_bit (live_subregs_used
, regno
);
4482 bitmap_set_bit (live_relevant_regs
, regno
);
4488 /* FIXME!! The following code is a disaster. Reload needs to see the
4489 labels and jump tables that are just hanging out in between
4490 the basic blocks. See pr33676. */
4491 insn
= BB_HEAD (bb
);
4493 /* Skip over the barriers and cruft. */
4494 while (insn
&& (BARRIER_P (insn
) || NOTE_P (insn
)
4495 || BLOCK_FOR_INSN (insn
) == bb
))
4496 insn
= PREV_INSN (insn
);
4498 /* While we add anything except barriers and notes, the focus is
4499 to get the labels and jump tables into the
4500 reload_insn_chain. */
4503 if (!NOTE_P (insn
) && !BARRIER_P (insn
))
4505 if (BLOCK_FOR_INSN (insn
))
4508 c
= new_insn_chain ();
4514 /* The block makes no sense here, but it is what the old
4516 c
->block
= bb
->index
;
4518 bitmap_copy (&c
->live_throughout
, live_relevant_regs
);
4520 insn
= PREV_INSN (insn
);
4524 reload_insn_chain
= c
;
4527 for (i
= 0; i
< (unsigned int) max_regno
; i
++)
4528 if (live_subregs
[i
] != NULL
)
4529 sbitmap_free (live_subregs
[i
]);
4530 free (live_subregs
);
4533 print_insn_chains (dump_file
);
4536 /* Examine the rtx found in *LOC, which is read or written to as determined
4537 by TYPE. Return false if we find a reason why an insn containing this
4538 rtx should not be moved (such as accesses to non-constant memory), true
4541 rtx_moveable_p (rtx
*loc
, enum op_type type
)
4547 enum rtx_code code
= GET_CODE (x
);
4557 return type
== OP_IN
;
4560 if (x
== frame_pointer_rtx
)
4562 if (HARD_REGISTER_P (x
))
4568 if (type
== OP_IN
&& MEM_READONLY_P (x
))
4569 return rtx_moveable_p (&XEXP (x
, 0), OP_IN
);
4573 return (rtx_moveable_p (&SET_SRC (x
), OP_IN
)
4574 && rtx_moveable_p (&SET_DEST (x
), OP_OUT
));
4576 case STRICT_LOW_PART
:
4577 return rtx_moveable_p (&XEXP (x
, 0), OP_OUT
);
4581 return (rtx_moveable_p (&XEXP (x
, 0), type
)
4582 && rtx_moveable_p (&XEXP (x
, 1), OP_IN
)
4583 && rtx_moveable_p (&XEXP (x
, 2), OP_IN
));
4586 return rtx_moveable_p (&SET_DEST (x
), OP_OUT
);
4588 case UNSPEC_VOLATILE
:
4589 /* It is a bad idea to consider insns with such rtl
4590 as moveable ones. The insn scheduler also considers them as barrier
4595 /* The same is true for volatile asm: it has unknown side effects, it
4596 cannot be moved at will. */
4597 if (MEM_VOLATILE_P (x
))
4604 fmt
= GET_RTX_FORMAT (code
);
4605 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4609 if (!rtx_moveable_p (&XEXP (x
, i
), type
))
4612 else if (fmt
[i
] == 'E')
4613 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
4615 if (!rtx_moveable_p (&XVECEXP (x
, i
, j
), type
))
4622 /* A wrapper around dominated_by_p, which uses the information in UID_LUID
4623 to give dominance relationships between two insns I1 and I2. */
4625 insn_dominated_by_p (rtx i1
, rtx i2
, int *uid_luid
)
4627 basic_block bb1
= BLOCK_FOR_INSN (i1
);
4628 basic_block bb2
= BLOCK_FOR_INSN (i2
);
4631 return uid_luid
[INSN_UID (i2
)] < uid_luid
[INSN_UID (i1
)];
4632 return dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
);
4635 /* Record the range of register numbers added by find_moveable_pseudos. */
4636 int first_moveable_pseudo
, last_moveable_pseudo
;
4638 /* These two vectors hold data for every register added by
4639 find_movable_pseudos, with index 0 holding data for the
4640 first_moveable_pseudo. */
4641 /* The original home register. */
4642 static vec
<rtx
> pseudo_replaced_reg
;
4644 /* Look for instances where we have an instruction that is known to increase
4645 register pressure, and whose result is not used immediately. If it is
4646 possible to move the instruction downwards to just before its first use,
4647 split its lifetime into two ranges. We create a new pseudo to compute the
4648 value, and emit a move instruction just before the first use. If, after
4649 register allocation, the new pseudo remains unallocated, the function
4650 move_unallocated_pseudos then deletes the move instruction and places
4651 the computation just before the first use.
4653 Such a move is safe and profitable if all the input registers remain live
4654 and unchanged between the original computation and its first use. In such
4655 a situation, the computation is known to increase register pressure, and
4656 moving it is known to at least not worsen it.
4658 We restrict moves to only those cases where a register remains unallocated,
4659 in order to avoid interfering too much with the instruction schedule. As
4660 an exception, we may move insns which only modify their input register
4661 (typically induction variables), as this increases the freedom for our
4662 intended transformation, and does not limit the second instruction
4666 find_moveable_pseudos (void)
4669 int max_regs
= max_reg_num ();
4670 int max_uid
= get_max_uid ();
4672 int *uid_luid
= XNEWVEC (int, max_uid
);
4673 rtx_insn
**closest_uses
= XNEWVEC (rtx_insn
*, max_regs
);
4674 /* A set of registers which are live but not modified throughout a block. */
4675 bitmap_head
*bb_transp_live
= XNEWVEC (bitmap_head
,
4676 last_basic_block_for_fn (cfun
));
4677 /* A set of registers which only exist in a given basic block. */
4678 bitmap_head
*bb_local
= XNEWVEC (bitmap_head
,
4679 last_basic_block_for_fn (cfun
));
4680 /* A set of registers which are set once, in an instruction that can be
4681 moved freely downwards, but are otherwise transparent to a block. */
4682 bitmap_head
*bb_moveable_reg_sets
= XNEWVEC (bitmap_head
,
4683 last_basic_block_for_fn (cfun
));
4684 auto_bitmap live
, used
, set
, interesting
, unusable_as_input
;
4687 first_moveable_pseudo
= max_regs
;
4688 pseudo_replaced_reg
.release ();
4689 pseudo_replaced_reg
.safe_grow_cleared (max_regs
, true);
4692 calculate_dominance_info (CDI_DOMINATORS
);
4695 FOR_EACH_BB_FN (bb
, cfun
)
4698 bitmap transp
= bb_transp_live
+ bb
->index
;
4699 bitmap moveable
= bb_moveable_reg_sets
+ bb
->index
;
4700 bitmap local
= bb_local
+ bb
->index
;
4702 bitmap_initialize (local
, 0);
4703 bitmap_initialize (transp
, 0);
4704 bitmap_initialize (moveable
, 0);
4705 bitmap_copy (live
, df_get_live_out (bb
));
4706 bitmap_and_into (live
, df_get_live_in (bb
));
4707 bitmap_copy (transp
, live
);
4708 bitmap_clear (moveable
);
4709 bitmap_clear (live
);
4710 bitmap_clear (used
);
4712 FOR_BB_INSNS (bb
, insn
)
4713 if (NONDEBUG_INSN_P (insn
))
4715 df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
4718 uid_luid
[INSN_UID (insn
)] = i
++;
4720 def
= df_single_def (insn_info
);
4721 use
= df_single_use (insn_info
);
4724 && DF_REF_REGNO (use
) == DF_REF_REGNO (def
)
4725 && !bitmap_bit_p (set
, DF_REF_REGNO (use
))
4726 && rtx_moveable_p (&PATTERN (insn
), OP_IN
))
4728 unsigned regno
= DF_REF_REGNO (use
);
4729 bitmap_set_bit (moveable
, regno
);
4730 bitmap_set_bit (set
, regno
);
4731 bitmap_set_bit (used
, regno
);
4732 bitmap_clear_bit (transp
, regno
);
4735 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
4737 unsigned regno
= DF_REF_REGNO (use
);
4738 bitmap_set_bit (used
, regno
);
4739 if (bitmap_clear_bit (moveable
, regno
))
4740 bitmap_clear_bit (transp
, regno
);
4743 FOR_EACH_INSN_INFO_DEF (def
, insn_info
)
4745 unsigned regno
= DF_REF_REGNO (def
);
4746 bitmap_set_bit (set
, regno
);
4747 bitmap_clear_bit (transp
, regno
);
4748 bitmap_clear_bit (moveable
, regno
);
4753 FOR_EACH_BB_FN (bb
, cfun
)
4755 bitmap local
= bb_local
+ bb
->index
;
4758 FOR_BB_INSNS (bb
, insn
)
4759 if (NONDEBUG_INSN_P (insn
))
4761 df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
4763 rtx closest_use
, note
;
4766 bool all_dominated
, all_local
;
4769 def
= df_single_def (insn_info
);
4770 /* There must be exactly one def in this insn. */
4771 if (!def
|| !single_set (insn
))
4773 /* This must be the only definition of the reg. We also limit
4774 which modes we deal with so that we can assume we can generate
4775 move instructions. */
4776 regno
= DF_REF_REGNO (def
);
4777 mode
= GET_MODE (DF_REF_REG (def
));
4778 if (DF_REG_DEF_COUNT (regno
) != 1
4779 || !DF_REF_INSN_INFO (def
)
4780 || HARD_REGISTER_NUM_P (regno
)
4781 || DF_REG_EQ_USE_COUNT (regno
) > 0
4782 || (!INTEGRAL_MODE_P (mode
)
4783 && !FLOAT_MODE_P (mode
)
4784 && !OPAQUE_MODE_P (mode
)))
4786 def_insn
= DF_REF_INSN (def
);
4788 for (note
= REG_NOTES (def_insn
); note
; note
= XEXP (note
, 1))
4789 if (REG_NOTE_KIND (note
) == REG_EQUIV
&& MEM_P (XEXP (note
, 0)))
4795 fprintf (dump_file
, "Ignoring reg %d, has equiv memory\n",
4797 bitmap_set_bit (unusable_as_input
, regno
);
4801 use
= DF_REG_USE_CHAIN (regno
);
4802 all_dominated
= true;
4804 closest_use
= NULL_RTX
;
4805 for (; use
; use
= DF_REF_NEXT_REG (use
))
4808 if (!DF_REF_INSN_INFO (use
))
4810 all_dominated
= false;
4814 insn
= DF_REF_INSN (use
);
4815 if (DEBUG_INSN_P (insn
))
4817 if (BLOCK_FOR_INSN (insn
) != BLOCK_FOR_INSN (def_insn
))
4819 if (!insn_dominated_by_p (insn
, def_insn
, uid_luid
))
4820 all_dominated
= false;
4821 if (closest_use
!= insn
&& closest_use
!= const0_rtx
)
4823 if (closest_use
== NULL_RTX
)
4825 else if (insn_dominated_by_p (closest_use
, insn
, uid_luid
))
4827 else if (!insn_dominated_by_p (insn
, closest_use
, uid_luid
))
4828 closest_use
= const0_rtx
;
4834 fprintf (dump_file
, "Reg %d not all uses dominated by set\n",
4839 bitmap_set_bit (local
, regno
);
4840 if (closest_use
== const0_rtx
|| closest_use
== NULL
4841 || next_nonnote_nondebug_insn (def_insn
) == closest_use
)
4844 fprintf (dump_file
, "Reg %d uninteresting%s\n", regno
,
4845 closest_use
== const0_rtx
|| closest_use
== NULL
4846 ? " (no unique first use)" : "");
4850 bitmap_set_bit (interesting
, regno
);
4851 /* If we get here, we know closest_use is a non-NULL insn
4852 (as opposed to const_0_rtx). */
4853 closest_uses
[regno
] = as_a
<rtx_insn
*> (closest_use
);
4855 if (dump_file
&& (all_local
|| all_dominated
))
4857 fprintf (dump_file
, "Reg %u:", regno
);
4859 fprintf (dump_file
, " local to bb %d", bb
->index
);
4861 fprintf (dump_file
, " def dominates all uses");
4862 if (closest_use
!= const0_rtx
)
4863 fprintf (dump_file
, " has unique first use");
4864 fputs ("\n", dump_file
);
4869 EXECUTE_IF_SET_IN_BITMAP (interesting
, 0, i
, bi
)
4871 df_ref def
= DF_REG_DEF_CHAIN (i
);
4872 rtx_insn
*def_insn
= DF_REF_INSN (def
);
4873 basic_block def_block
= BLOCK_FOR_INSN (def_insn
);
4874 bitmap def_bb_local
= bb_local
+ def_block
->index
;
4875 bitmap def_bb_moveable
= bb_moveable_reg_sets
+ def_block
->index
;
4876 bitmap def_bb_transp
= bb_transp_live
+ def_block
->index
;
4877 bool local_to_bb_p
= bitmap_bit_p (def_bb_local
, i
);
4878 rtx_insn
*use_insn
= closest_uses
[i
];
4881 bool all_transp
= true;
4883 if (!REG_P (DF_REF_REG (def
)))
4889 fprintf (dump_file
, "Reg %u not local to one basic block\n",
4893 if (reg_equiv_init (i
) != NULL_RTX
)
4896 fprintf (dump_file
, "Ignoring reg %u with equiv init insn\n",
4900 if (!rtx_moveable_p (&PATTERN (def_insn
), OP_IN
))
4903 fprintf (dump_file
, "Found def insn %d for %d to be not moveable\n",
4904 INSN_UID (def_insn
), i
);
4908 fprintf (dump_file
, "Examining insn %d, def for %d\n",
4909 INSN_UID (def_insn
), i
);
4910 FOR_EACH_INSN_USE (use
, def_insn
)
4912 unsigned regno
= DF_REF_REGNO (use
);
4913 if (bitmap_bit_p (unusable_as_input
, regno
))
4917 fprintf (dump_file
, " found unusable input reg %u.\n", regno
);
4920 if (!bitmap_bit_p (def_bb_transp
, regno
))
4922 if (bitmap_bit_p (def_bb_moveable
, regno
)
4923 && !control_flow_insn_p (use_insn
))
4925 if (modified_between_p (DF_REF_REG (use
), def_insn
, use_insn
))
4927 rtx_insn
*x
= NEXT_INSN (def_insn
);
4928 while (!modified_in_p (DF_REF_REG (use
), x
))
4930 gcc_assert (x
!= use_insn
);
4934 fprintf (dump_file
, " input reg %u modified but insn %d moveable\n",
4935 regno
, INSN_UID (x
));
4936 emit_insn_after (PATTERN (x
), use_insn
);
4937 set_insn_deleted (x
);
4942 fprintf (dump_file
, " input reg %u modified between def and use\n",
4953 if (!dbg_cnt (ira_move
))
4956 fprintf (dump_file
, " all ok%s\n", all_transp
? " and transp" : "");
4960 rtx def_reg
= DF_REF_REG (def
);
4961 rtx newreg
= ira_create_new_reg (def_reg
);
4962 if (validate_change (def_insn
, DF_REF_REAL_LOC (def
), newreg
, 0))
4964 unsigned nregno
= REGNO (newreg
);
4965 emit_insn_before (gen_move_insn (def_reg
, newreg
), use_insn
);
4967 pseudo_replaced_reg
[nregno
] = def_reg
;
4972 FOR_EACH_BB_FN (bb
, cfun
)
4974 bitmap_clear (bb_local
+ bb
->index
);
4975 bitmap_clear (bb_transp_live
+ bb
->index
);
4976 bitmap_clear (bb_moveable_reg_sets
+ bb
->index
);
4979 free (closest_uses
);
4981 free (bb_transp_live
);
4982 free (bb_moveable_reg_sets
);
4984 last_moveable_pseudo
= max_reg_num ();
4986 fix_reg_equiv_init ();
4988 regstat_free_n_sets_and_refs ();
4990 regstat_init_n_sets_and_refs ();
4991 regstat_compute_ri ();
4992 free_dominance_info (CDI_DOMINATORS
);
4995 /* If SET pattern SET is an assignment from a hard register to a pseudo which
4996 is live at CALL_DOM (if non-NULL, otherwise this check is omitted), return
4997 the destination. Otherwise return NULL. */
5000 interesting_dest_for_shprep_1 (rtx set
, basic_block call_dom
)
5002 rtx src
= SET_SRC (set
);
5003 rtx dest
= SET_DEST (set
);
5004 if (!REG_P (src
) || !HARD_REGISTER_P (src
)
5005 || !REG_P (dest
) || HARD_REGISTER_P (dest
)
5006 || (call_dom
&& !bitmap_bit_p (df_get_live_in (call_dom
), REGNO (dest
))))
5011 /* If insn is interesting for parameter range-splitting shrink-wrapping
5012 preparation, i.e. it is a single set from a hard register to a pseudo, which
5013 is live at CALL_DOM (if non-NULL, otherwise this check is omitted), or a
5014 parallel statement with only one such statement, return the destination.
5015 Otherwise return NULL. */
5018 interesting_dest_for_shprep (rtx_insn
*insn
, basic_block call_dom
)
5022 rtx pat
= PATTERN (insn
);
5023 if (GET_CODE (pat
) == SET
)
5024 return interesting_dest_for_shprep_1 (pat
, call_dom
);
5026 if (GET_CODE (pat
) != PARALLEL
)
5029 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
5031 rtx sub
= XVECEXP (pat
, 0, i
);
5032 if (GET_CODE (sub
) == USE
|| GET_CODE (sub
) == CLOBBER
)
5034 if (GET_CODE (sub
) != SET
5035 || side_effects_p (sub
))
5037 rtx dest
= interesting_dest_for_shprep_1 (sub
, call_dom
);
5046 /* Split live ranges of pseudos that are loaded from hard registers in the
5047 first BB in a BB that dominates all non-sibling call if such a BB can be
5048 found and is not in a loop. Return true if the function has made any
5052 split_live_ranges_for_shrink_wrap (void)
5054 basic_block bb
, call_dom
= NULL
;
5055 basic_block first
= single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
5056 rtx_insn
*insn
, *last_interesting_insn
= NULL
;
5057 auto_bitmap need_new
, reachable
;
5058 vec
<basic_block
> queue
;
5060 if (!SHRINK_WRAPPING_ENABLED
)
5063 queue
.create (n_basic_blocks_for_fn (cfun
));
5065 FOR_EACH_BB_FN (bb
, cfun
)
5066 FOR_BB_INSNS (bb
, insn
)
5067 if (CALL_P (insn
) && !SIBLING_CALL_P (insn
))
5075 bitmap_set_bit (need_new
, bb
->index
);
5076 bitmap_set_bit (reachable
, bb
->index
);
5077 queue
.quick_push (bb
);
5081 if (queue
.is_empty ())
5087 while (!queue
.is_empty ())
5093 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5094 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
5095 && bitmap_set_bit (reachable
, e
->dest
->index
))
5096 queue
.quick_push (e
->dest
);
5100 FOR_BB_INSNS (first
, insn
)
5102 rtx dest
= interesting_dest_for_shprep (insn
, NULL
);
5106 if (DF_REG_DEF_COUNT (REGNO (dest
)) > 1)
5109 for (df_ref use
= DF_REG_USE_CHAIN (REGNO(dest
));
5111 use
= DF_REF_NEXT_REG (use
))
5113 int ubbi
= DF_REF_BB (use
)->index
;
5114 if (bitmap_bit_p (reachable
, ubbi
))
5115 bitmap_set_bit (need_new
, ubbi
);
5117 last_interesting_insn
= insn
;
5120 if (!last_interesting_insn
)
5123 call_dom
= nearest_common_dominator_for_set (CDI_DOMINATORS
, need_new
);
5124 if (call_dom
== first
)
5127 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
5128 while (bb_loop_depth (call_dom
) > 0)
5129 call_dom
= get_immediate_dominator (CDI_DOMINATORS
, call_dom
);
5130 loop_optimizer_finalize ();
5132 if (call_dom
== first
)
5135 calculate_dominance_info (CDI_POST_DOMINATORS
);
5136 if (dominated_by_p (CDI_POST_DOMINATORS
, first
, call_dom
))
5138 free_dominance_info (CDI_POST_DOMINATORS
);
5141 free_dominance_info (CDI_POST_DOMINATORS
);
5144 fprintf (dump_file
, "Will split live ranges of parameters at BB %i\n",
5148 FOR_BB_INSNS (first
, insn
)
5150 rtx dest
= interesting_dest_for_shprep (insn
, call_dom
);
5151 if (!dest
|| dest
== pic_offset_table_rtx
)
5154 bool need_newreg
= false;
5156 for (use
= DF_REG_USE_CHAIN (REGNO (dest
)); use
; use
= next
)
5158 rtx_insn
*uin
= DF_REF_INSN (use
);
5159 next
= DF_REF_NEXT_REG (use
);
5161 if (DEBUG_INSN_P (uin
))
5164 basic_block ubb
= BLOCK_FOR_INSN (uin
);
5166 || dominated_by_p (CDI_DOMINATORS
, ubb
, call_dom
))
5175 rtx newreg
= ira_create_new_reg (dest
);
5177 for (use
= DF_REG_USE_CHAIN (REGNO (dest
)); use
; use
= next
)
5179 rtx_insn
*uin
= DF_REF_INSN (use
);
5180 next
= DF_REF_NEXT_REG (use
);
5182 basic_block ubb
= BLOCK_FOR_INSN (uin
);
5184 || dominated_by_p (CDI_DOMINATORS
, ubb
, call_dom
))
5185 validate_change (uin
, DF_REF_REAL_LOC (use
), newreg
, true);
5188 rtx_insn
*new_move
= gen_move_insn (newreg
, dest
);
5189 emit_insn_after (new_move
, bb_note (call_dom
));
5192 fprintf (dump_file
, "Split live-range of register ");
5193 print_rtl_single (dump_file
, dest
);
5198 if (insn
== last_interesting_insn
)
5201 apply_change_group ();
5205 /* Perform the second half of the transformation started in
5206 find_moveable_pseudos. We look for instances where the newly introduced
5207 pseudo remains unallocated, and remove it by moving the definition to
5208 just before its use, replacing the move instruction generated by
5209 find_moveable_pseudos. */
5211 move_unallocated_pseudos (void)
5214 for (i
= first_moveable_pseudo
; i
< last_moveable_pseudo
; i
++)
5215 if (reg_renumber
[i
] < 0)
5217 int idx
= i
- first_moveable_pseudo
;
5218 rtx other_reg
= pseudo_replaced_reg
[idx
];
5219 /* The iterating range [first_moveable_pseudo, last_moveable_pseudo)
5220 covers every new pseudo created in find_moveable_pseudos,
5221 regardless of the validation with it is successful or not.
5222 So we need to skip the pseudos which were used in those failed
5223 validations to avoid unexpected DF info and consequent ICE.
5224 We only set pseudo_replaced_reg[] when the validation is successful
5225 in find_moveable_pseudos, it's enough to check it here. */
5228 rtx_insn
*def_insn
= DF_REF_INSN (DF_REG_DEF_CHAIN (i
));
5229 /* The use must follow all definitions of OTHER_REG, so we can
5230 insert the new definition immediately after any of them. */
5231 df_ref other_def
= DF_REG_DEF_CHAIN (REGNO (other_reg
));
5232 rtx_insn
*move_insn
= DF_REF_INSN (other_def
);
5233 rtx_insn
*newinsn
= emit_insn_after (PATTERN (def_insn
), move_insn
);
5238 fprintf (dump_file
, "moving def of %d (insn %d now) ",
5239 REGNO (other_reg
), INSN_UID (def_insn
));
5241 delete_insn (move_insn
);
5242 while ((other_def
= DF_REG_DEF_CHAIN (REGNO (other_reg
))))
5243 delete_insn (DF_REF_INSN (other_def
));
5244 delete_insn (def_insn
);
5246 set
= single_set (newinsn
);
5247 success
= validate_change (newinsn
, &SET_DEST (set
), other_reg
, 0);
5248 gcc_assert (success
);
5250 fprintf (dump_file
, " %d) rather than keep unallocated replacement %d\n",
5251 INSN_UID (newinsn
), i
);
5252 SET_REG_N_REFS (i
, 0);
5255 first_moveable_pseudo
= last_moveable_pseudo
= 0;
5260 /* Code dealing with scratches (changing them onto
5261 pseudos and restoring them from the pseudos).
5263 We change scratches into pseudos at the beginning of IRA to
5264 simplify dealing with them (conflicts, hard register assignments).
5266 If the pseudo denoting scratch was spilled it means that we do not
5267 need a hard register for it. Such pseudos are transformed back to
5268 scratches at the end of LRA. */
5270 /* Description of location of a former scratch operand. */
5273 rtx_insn
*insn
; /* Insn where the scratch was. */
5274 int nop
; /* Number of the operand which was a scratch. */
5275 unsigned regno
; /* regno gnerated instead of scratch */
5276 int icode
; /* Original icode from which scratch was removed. */
5279 typedef struct sloc
*sloc_t
;
5281 /* Locations of the former scratches. */
5282 static vec
<sloc_t
> scratches
;
5284 /* Bitmap of scratch regnos. */
5285 static bitmap_head scratch_bitmap
;
5287 /* Bitmap of scratch operands. */
5288 static bitmap_head scratch_operand_bitmap
;
5290 /* Return true if pseudo REGNO is made of SCRATCH. */
5292 ira_former_scratch_p (int regno
)
5294 return bitmap_bit_p (&scratch_bitmap
, regno
);
5297 /* Return true if the operand NOP of INSN is a former scratch. */
5299 ira_former_scratch_operand_p (rtx_insn
*insn
, int nop
)
5301 return bitmap_bit_p (&scratch_operand_bitmap
,
5302 INSN_UID (insn
) * MAX_RECOG_OPERANDS
+ nop
) != 0;
5305 /* Register operand NOP in INSN as a former scratch. It will be
5306 changed to scratch back, if it is necessary, at the LRA end. */
5308 ira_register_new_scratch_op (rtx_insn
*insn
, int nop
, int icode
)
5310 rtx op
= *recog_data
.operand_loc
[nop
];
5311 sloc_t loc
= XNEW (struct sloc
);
5312 ira_assert (REG_P (op
));
5315 loc
->regno
= REGNO (op
);
5317 scratches
.safe_push (loc
);
5318 bitmap_set_bit (&scratch_bitmap
, REGNO (op
));
5319 bitmap_set_bit (&scratch_operand_bitmap
,
5320 INSN_UID (insn
) * MAX_RECOG_OPERANDS
+ nop
);
5321 add_reg_note (insn
, REG_UNUSED
, op
);
5324 /* Return true if string STR contains constraint 'X'. */
5326 contains_X_constraint_p (const char *str
)
5332 str
+= CONSTRAINT_LEN (c
, str
);
5333 if (c
== 'X') return true;
5338 /* Change INSN's scratches into pseudos and save their location.
5339 Return true if we changed any scratch. */
5341 ira_remove_insn_scratches (rtx_insn
*insn
, bool all_p
, FILE *dump_file
,
5342 rtx (*get_reg
) (rtx original
))
5345 bool insn_changed_p
;
5348 extract_insn (insn
);
5349 insn_changed_p
= false;
5350 for (i
= 0; i
< recog_data
.n_operands
; i
++)
5352 loc
= recog_data
.operand_loc
[i
];
5353 if (GET_CODE (*loc
) == SCRATCH
&& GET_MODE (*loc
) != VOIDmode
)
5355 if (! all_p
&& contains_X_constraint_p (recog_data
.constraints
[i
]))
5357 insn_changed_p
= true;
5358 *loc
= reg
= get_reg (*loc
);
5359 ira_register_new_scratch_op (insn
, i
, INSN_CODE (insn
));
5360 if (ira_dump_file
!= NULL
)
5362 "Removing SCRATCH to p%u in insn #%u (nop %d)\n",
5363 REGNO (reg
), INSN_UID (insn
), i
);
5366 return insn_changed_p
;
5369 /* Return new register of the same mode as ORIGINAL. Used in
5370 remove_scratches. */
5372 get_scratch_reg (rtx original
)
5374 return gen_reg_rtx (GET_MODE (original
));
5377 /* Change scratches into pseudos and save their location. Return true
5378 if we changed any scratch. */
5380 remove_scratches (void)
5382 bool change_p
= false;
5386 scratches
.create (get_max_uid ());
5387 bitmap_initialize (&scratch_bitmap
, ®_obstack
);
5388 bitmap_initialize (&scratch_operand_bitmap
, ®_obstack
);
5389 FOR_EACH_BB_FN (bb
, cfun
)
5390 FOR_BB_INSNS (bb
, insn
)
5392 && ira_remove_insn_scratches (insn
, false, ira_dump_file
, get_scratch_reg
))
5394 /* Because we might use DF, we need to keep DF info up to date. */
5395 df_insn_rescan (insn
);
5401 /* Changes pseudos created by function remove_scratches onto scratches. */
5403 ira_restore_scratches (FILE *dump_file
)
5410 for (i
= 0; scratches
.iterate (i
, &loc
); i
++)
5412 /* Ignore already deleted insns. */
5413 if (NOTE_P (loc
->insn
)
5414 && NOTE_KIND (loc
->insn
) == NOTE_INSN_DELETED
)
5416 extract_insn (loc
->insn
);
5417 if (loc
->icode
!= INSN_CODE (loc
->insn
))
5419 /* The icode doesn't match, which means the insn has been modified
5420 (e.g. register elimination). The scratch cannot be restored. */
5423 op_loc
= recog_data
.operand_loc
[loc
->nop
];
5425 && ((regno
= REGNO (*op_loc
)) >= FIRST_PSEUDO_REGISTER
)
5426 && reg_renumber
[regno
] < 0)
5428 /* It should be only case when scratch register with chosen
5429 constraint 'X' did not get memory or hard register. */
5430 ira_assert (ira_former_scratch_p (regno
));
5431 *op_loc
= gen_rtx_SCRATCH (GET_MODE (*op_loc
));
5432 for (n
= 0; n
< recog_data
.n_dups
; n
++)
5433 *recog_data
.dup_loc
[n
]
5434 = *recog_data
.operand_loc
[(int) recog_data
.dup_num
[n
]];
5435 if (dump_file
!= NULL
)
5436 fprintf (dump_file
, "Restoring SCRATCH in insn #%u(nop %d)\n",
5437 INSN_UID (loc
->insn
), loc
->nop
);
5440 for (i
= 0; scratches
.iterate (i
, &loc
); i
++)
5442 scratches
.release ();
5443 bitmap_clear (&scratch_bitmap
);
5444 bitmap_clear (&scratch_operand_bitmap
);
5449 /* If the backend knows where to allocate pseudos for hard
5450 register initial values, register these allocations now. */
5452 allocate_initial_values (void)
5454 if (targetm
.allocate_initial_value
)
5459 for (i
= 0; HARD_REGISTER_NUM_P (i
); i
++)
5461 if (! initial_value_entry (i
, &hreg
, &preg
))
5464 x
= targetm
.allocate_initial_value (hreg
);
5465 regno
= REGNO (preg
);
5466 if (x
&& REG_N_SETS (regno
) <= 1)
5469 reg_equiv_memory_loc (regno
) = x
;
5475 gcc_assert (REG_P (x
));
5476 new_regno
= REGNO (x
);
5477 reg_renumber
[regno
] = new_regno
;
5478 /* Poke the regno right into regno_reg_rtx so that even
5479 fixed regs are accepted. */
5480 SET_REGNO (preg
, new_regno
);
5481 /* Update global register liveness information. */
5482 FOR_EACH_BB_FN (bb
, cfun
)
5484 if (REGNO_REG_SET_P (df_get_live_in (bb
), regno
))
5485 SET_REGNO_REG_SET (df_get_live_in (bb
), new_regno
);
5486 if (REGNO_REG_SET_P (df_get_live_out (bb
), regno
))
5487 SET_REGNO_REG_SET (df_get_live_out (bb
), new_regno
);
5493 gcc_checking_assert (! initial_value_entry (FIRST_PSEUDO_REGISTER
,
5501 /* True when we use LRA instead of reload pass for the current
5505 /* True if we have allocno conflicts. It is false for non-optimized
5506 mode or when the conflict table is too big. */
5507 bool ira_conflicts_p
;
5509 /* Saved between IRA and reload. */
5510 static int saved_flag_ira_share_spill_slots
;
5512 /* This is the main entry of IRA. */
5517 int ira_max_point_before_emit
;
5518 bool saved_flag_caller_saves
= flag_caller_saves
;
5519 enum ira_region saved_flag_ira_region
= flag_ira_region
;
5523 bool output_jump_reload_p
= false;
5527 /* First put potential jump output reloads on the output edges
5528 as USE which will be removed at the end of LRA. The major
5529 goal is actually to create BBs for critical edges for LRA and
5530 populate them later by live info. In LRA it will be
5531 difficult to do this. */
5532 FOR_EACH_BB_FN (bb
, cfun
)
5534 rtx_insn
*end
= BB_END (bb
);
5538 for (int i
= 0; i
< recog_data
.n_operands
; i
++)
5539 if (recog_data
.operand_type
[i
] != OP_IN
)
5541 bool skip_p
= false;
5542 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5543 if (EDGE_CRITICAL_P (e
)
5544 && e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
5545 && (e
->flags
& EDGE_ABNORMAL
))
5552 output_jump_reload_p
= true;
5553 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
5554 if (EDGE_CRITICAL_P (e
)
5555 && e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
5558 /* We need to put some no-op insn here. We can
5559 not put a note as commit_edges insertion will
5561 emit_insn (gen_rtx_USE (VOIDmode
, const1_rtx
));
5562 rtx_insn
*insns
= get_insns ();
5564 insert_insn_on_edge (insns
, e
);
5569 if (output_jump_reload_p
)
5570 commit_edge_insertions ();
5573 if (flag_ira_verbose
< 10)
5575 internal_flag_ira_verbose
= flag_ira_verbose
;
5580 internal_flag_ira_verbose
= flag_ira_verbose
- 10;
5581 ira_dump_file
= stderr
;
5586 /* Determine if the current function is a leaf before running IRA
5587 since this can impact optimizations done by the prologue and
5588 epilogue thus changing register elimination offsets.
5589 Other target callbacks may use crtl->is_leaf too, including
5590 SHRINK_WRAPPING_ENABLED, so initialize as early as possible. */
5591 crtl
->is_leaf
= leaf_function_p ();
5593 /* Perform target specific PIC register initialization. */
5594 targetm
.init_pic_reg ();
5596 ira_conflicts_p
= optimize
> 0;
5598 /* Determine the number of pseudos actually requiring coloring. */
5599 unsigned int num_used_regs
= 0;
5600 for (unsigned int i
= FIRST_PSEUDO_REGISTER
; i
< DF_REG_SIZE (df
); i
++)
5601 if (DF_REG_DEF_COUNT (i
) || DF_REG_USE_COUNT (i
))
5604 /* If there are too many pseudos and/or basic blocks (e.g. 10K
5605 pseudos and 10K blocks or 100K pseudos and 1K blocks), we will
5606 use simplified and faster algorithms in LRA. */
5609 && num_used_regs
>= (1U << 26) / last_basic_block_for_fn (cfun
);
5613 /* It permits to skip live range splitting in LRA. */
5614 flag_caller_saves
= false;
5615 /* There is no sense to do regional allocation when we use
5617 flag_ira_region
= IRA_REGION_ONE
;
5618 ira_conflicts_p
= false;
5621 #ifndef IRA_NO_OBSTACK
5622 gcc_obstack_init (&ira_obstack
);
5624 bitmap_obstack_initialize (&ira_bitmap_obstack
);
5626 /* LRA uses its own infrastructure to handle caller save registers. */
5627 if (flag_caller_saves
&& !ira_use_lra_p
)
5628 init_caller_save ();
5630 setup_prohibited_mode_move_regs ();
5631 decrease_live_ranges_number ();
5632 df_note_add_problem ();
5634 /* DF_LIVE can't be used in the register allocator, too many other
5635 parts of the compiler depend on using the "classic" liveness
5636 interpretation of the DF_LR problem. See PR38711.
5637 Remove the problem, so that we don't spend time updating it in
5638 any of the df_analyze() calls during IRA/LRA. */
5640 df_remove_problem (df_live
);
5641 gcc_checking_assert (df_live
== NULL
);
5644 df
->changeable_flags
|= DF_VERIFY_SCHEDULED
;
5649 if (ira_conflicts_p
)
5651 calculate_dominance_info (CDI_DOMINATORS
);
5653 if (split_live_ranges_for_shrink_wrap ())
5656 free_dominance_info (CDI_DOMINATORS
);
5659 df_clear_flags (DF_NO_INSN_RESCAN
);
5661 indirect_jump_optimize ();
5662 if (delete_trivially_dead_insns (get_insns (), max_reg_num ()))
5665 regstat_init_n_sets_and_refs ();
5666 regstat_compute_ri ();
5668 /* If we are not optimizing, then this is the only place before
5669 register allocation where dataflow is done. And that is needed
5670 to generate these warnings. */
5672 generate_setjmp_warnings ();
5674 /* update_equiv_regs can use reg classes of pseudos and they are set up in
5675 register pressure sensitive scheduling and loop invariant motion and in
5676 live range shrinking. This info can become obsolete if we add new pseudos
5677 since the last set up. Recalculate it again if the new pseudos were
5679 if (resize_reg_info () && (flag_sched_pressure
|| flag_live_range_shrinkage
5680 || flag_ira_loop_pressure
))
5681 ira_set_pseudo_classes (true, ira_dump_file
);
5683 init_alias_analysis ();
5684 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
5685 reg_equiv
= XCNEWVEC (struct equivalence
, max_reg_num ());
5686 update_equiv_regs_prescan ();
5687 update_equiv_regs ();
5689 /* Don't move insns if live range shrinkage or register
5690 pressure-sensitive scheduling were done because it will not
5691 improve allocation but likely worsen insn scheduling. */
5693 && !flag_live_range_shrinkage
5694 && !(flag_sched_pressure
&& flag_schedule_insns
))
5695 combine_and_move_insns ();
5697 /* Gather additional equivalences with memory. */
5699 add_store_equivs ();
5701 loop_optimizer_finalize ();
5702 free_dominance_info (CDI_DOMINATORS
);
5703 end_alias_analysis ();
5706 /* Once max_regno changes, we need to free and re-init/re-compute
5707 some data structures like regstat_n_sets_and_refs and reg_info_p. */
5708 auto regstat_recompute_for_max_regno
= []() {
5709 regstat_free_n_sets_and_refs ();
5711 regstat_init_n_sets_and_refs ();
5712 regstat_compute_ri ();
5715 int max_regno_before_rm
= max_reg_num ();
5716 if (ira_use_lra_p
&& remove_scratches ())
5718 ira_expand_reg_equiv ();
5719 /* For now remove_scatches is supposed to create pseudos when it
5720 succeeds, assert this happens all the time. Once it doesn't
5721 hold, we should guard the regstat recompute for the case
5722 max_regno changes. */
5723 gcc_assert (max_regno_before_rm
!= max_reg_num ());
5724 regstat_recompute_for_max_regno ();
5729 setup_reg_equiv_init ();
5731 allocated_reg_info_size
= max_reg_num ();
5733 /* It is not worth to do such improvement when we use a simple
5734 allocation because of -O0 usage or because the function is too
5736 if (ira_conflicts_p
)
5737 find_moveable_pseudos ();
5739 max_regno_before_ira
= max_reg_num ();
5740 ira_setup_eliminable_regset ();
5742 ira_overall_cost
= ira_reg_cost
= ira_mem_cost
= 0;
5743 ira_load_cost
= ira_store_cost
= ira_shuffle_cost
= 0;
5744 ira_move_loops_num
= ira_additional_jumps_num
= 0;
5746 ira_assert (current_loops
== NULL
);
5747 if (flag_ira_region
== IRA_REGION_ALL
|| flag_ira_region
== IRA_REGION_MIXED
)
5748 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
| LOOPS_HAVE_RECORDED_EXITS
);
5750 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
5751 fprintf (ira_dump_file
, "Building IRA IR\n");
5752 loops_p
= ira_build ();
5754 ira_assert (ira_conflicts_p
|| !loops_p
);
5756 saved_flag_ira_share_spill_slots
= flag_ira_share_spill_slots
;
5757 if (too_high_register_pressure_p () || cfun
->calls_setjmp
)
5758 /* It is just wasting compiler's time to pack spilled pseudos into
5759 stack slots in this case -- prohibit it. We also do this if
5760 there is setjmp call because a variable not modified between
5761 setjmp and longjmp the compiler is required to preserve its
5762 value and sharing slots does not guarantee it. */
5763 flag_ira_share_spill_slots
= FALSE
;
5767 ira_max_point_before_emit
= ira_max_point
;
5769 ira_initiate_emit_data ();
5773 max_regno
= max_reg_num ();
5774 if (ira_conflicts_p
)
5778 if (! ira_use_lra_p
)
5779 ira_initiate_assign ();
5788 ira_allocno_iterator ai
;
5790 FOR_EACH_ALLOCNO (a
, ai
)
5792 int old_regno
= ALLOCNO_REGNO (a
);
5793 int new_regno
= REGNO (ALLOCNO_EMIT_DATA (a
)->reg
);
5795 ALLOCNO_REGNO (a
) = new_regno
;
5797 if (old_regno
!= new_regno
)
5798 setup_reg_classes (new_regno
, reg_preferred_class (old_regno
),
5799 reg_alternate_class (old_regno
),
5800 reg_allocno_class (old_regno
));
5805 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
5806 fprintf (ira_dump_file
, "Flattening IR\n");
5807 ira_flattening (max_regno_before_ira
, ira_max_point_before_emit
);
5809 /* New insns were generated: add notes and recalculate live
5813 /* ??? Rebuild the loop tree, but why? Does the loop tree
5814 change if new insns were generated? Can that be handled
5815 by updating the loop tree incrementally? */
5816 loop_optimizer_finalize ();
5817 free_dominance_info (CDI_DOMINATORS
);
5818 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
5819 | LOOPS_HAVE_RECORDED_EXITS
);
5821 if (! ira_use_lra_p
)
5823 setup_allocno_assignment_flags ();
5824 ira_initiate_assign ();
5825 ira_reassign_conflict_allocnos (max_regno
);
5830 ira_finish_emit_data ();
5832 setup_reg_renumber ();
5834 calculate_allocation_cost ();
5836 #ifdef ENABLE_IRA_CHECKING
5837 if (ira_conflicts_p
&& ! ira_use_lra_p
)
5838 /* Opposite to reload pass, LRA does not use any conflict info
5839 from IRA. We don't rebuild conflict info for LRA (through
5840 ira_flattening call) and cannot use the check here. We could
5841 rebuild this info for LRA in the check mode but there is a risk
5842 that code generated with the check and without it will be a bit
5843 different. Calling ira_flattening in any mode would be a
5844 wasting CPU time. So do not check the allocation for LRA. */
5845 check_allocation ();
5848 if (max_regno
!= max_regno_before_ira
)
5849 regstat_recompute_for_max_regno ();
5851 overall_cost_before
= ira_overall_cost
;
5852 if (! ira_conflicts_p
)
5856 fix_reg_equiv_init ();
5858 #ifdef ENABLE_IRA_CHECKING
5859 print_redundant_copies ();
5861 if (! ira_use_lra_p
)
5863 ira_spilled_reg_stack_slots_num
= 0;
5864 ira_spilled_reg_stack_slots
5865 = ((class ira_spilled_reg_stack_slot
*)
5866 ira_allocate (max_regno
5867 * sizeof (class ira_spilled_reg_stack_slot
)));
5868 memset ((void *)ira_spilled_reg_stack_slots
, 0,
5869 max_regno
* sizeof (class ira_spilled_reg_stack_slot
));
5872 allocate_initial_values ();
5874 /* See comment for find_moveable_pseudos call. */
5875 if (ira_conflicts_p
)
5876 move_unallocated_pseudos ();
5878 /* Restore original values. */
5881 flag_caller_saves
= saved_flag_caller_saves
;
5882 flag_ira_region
= saved_flag_ira_region
;
5886 /* Modify asm goto to avoid further trouble with this insn. We can
5887 not replace the insn by USE as in other asm insns as we still
5888 need to keep CFG consistency. */
5890 ira_nullify_asm_goto (rtx_insn
*insn
)
5892 ira_assert (JUMP_P (insn
) && INSN_CODE (insn
) < 0);
5893 rtx tmp
= extract_asm_operands (PATTERN (insn
));
5894 PATTERN (insn
) = gen_rtx_ASM_OPERANDS (VOIDmode
, ggc_strdup (""), "", 0,
5897 ASM_OPERANDS_LABEL_VEC (tmp
),
5898 ASM_OPERANDS_SOURCE_LOCATION(tmp
));
5906 unsigned pic_offset_table_regno
= INVALID_REGNUM
;
5908 if (flag_ira_verbose
< 10)
5909 ira_dump_file
= dump_file
;
5911 /* If pic_offset_table_rtx is a pseudo register, then keep it so
5912 after reload to avoid possible wrong usages of hard reg assigned
5914 if (pic_offset_table_rtx
5915 && REGNO (pic_offset_table_rtx
) >= FIRST_PSEUDO_REGISTER
)
5916 pic_offset_table_regno
= REGNO (pic_offset_table_rtx
);
5918 timevar_push (TV_RELOAD
);
5921 if (current_loops
!= NULL
)
5923 loop_optimizer_finalize ();
5924 free_dominance_info (CDI_DOMINATORS
);
5926 FOR_ALL_BB_FN (bb
, cfun
)
5927 bb
->loop_father
= NULL
;
5928 current_loops
= NULL
;
5932 lra (ira_dump_file
);
5933 /* ???!!! Move it before lra () when we use ira_reg_equiv in
5935 vec_free (reg_equivs
);
5941 df_set_flags (DF_NO_INSN_RESCAN
);
5942 build_insn_chain ();
5944 need_dce
= reload (get_insns (), ira_conflicts_p
);
5947 timevar_pop (TV_RELOAD
);
5949 timevar_push (TV_IRA
);
5951 if (ira_conflicts_p
&& ! ira_use_lra_p
)
5953 ira_free (ira_spilled_reg_stack_slots
);
5954 ira_finish_assign ();
5957 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
5958 && overall_cost_before
!= ira_overall_cost
)
5959 fprintf (ira_dump_file
, "+++Overall after reload %" PRId64
"\n",
5962 flag_ira_share_spill_slots
= saved_flag_ira_share_spill_slots
;
5964 if (! ira_use_lra_p
)
5967 if (current_loops
!= NULL
)
5969 loop_optimizer_finalize ();
5970 free_dominance_info (CDI_DOMINATORS
);
5972 FOR_ALL_BB_FN (bb
, cfun
)
5973 bb
->loop_father
= NULL
;
5974 current_loops
= NULL
;
5977 regstat_free_n_sets_and_refs ();
5981 cleanup_cfg (CLEANUP_EXPENSIVE
);
5983 finish_reg_equiv ();
5985 bitmap_obstack_release (&ira_bitmap_obstack
);
5986 #ifndef IRA_NO_OBSTACK
5987 obstack_free (&ira_obstack
, NULL
);
5990 /* The code after the reload has changed so much that at this point
5991 we might as well just rescan everything. Note that
5992 df_rescan_all_insns is not going to help here because it does not
5993 touch the artificial uses and defs. */
5994 df_finish_pass (true);
5995 df_scan_alloc (NULL
);
6000 df_live_add_problem ();
6001 df_live_set_all_dirty ();
6007 if (need_dce
&& optimize
)
6010 /* Diagnose uses of the hard frame pointer when it is used as a global
6011 register. Often we can get away with letting the user appropriate
6012 the frame pointer, but we should let them know when code generation
6013 makes that impossible. */
6014 if (global_regs
[HARD_FRAME_POINTER_REGNUM
] && frame_pointer_needed
)
6016 tree decl
= global_regs_decl
[HARD_FRAME_POINTER_REGNUM
];
6017 error_at (DECL_SOURCE_LOCATION (current_function_decl
),
6018 "frame pointer required, but reserved");
6019 inform (DECL_SOURCE_LOCATION (decl
), "for %qD", decl
);
6022 /* If we are doing generic stack checking, give a warning if this
6023 function's frame size is larger than we expect. */
6024 if (flag_stack_check
== GENERIC_STACK_CHECK
)
6026 poly_int64 size
= get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE
;
6028 for (int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6029 if (df_regs_ever_live_p (i
)
6031 && !crtl
->abi
->clobbers_full_reg_p (i
))
6032 size
+= UNITS_PER_WORD
;
6034 if (constant_lower_bound (size
) > STACK_CHECK_MAX_FRAME_SIZE
)
6035 warning (0, "frame size too large for reliable stack checking");
6038 if (pic_offset_table_regno
!= INVALID_REGNUM
)
6039 pic_offset_table_rtx
= gen_rtx_REG (Pmode
, pic_offset_table_regno
);
6041 timevar_pop (TV_IRA
);
6044 /* Run the integrated register allocator. */
6048 const pass_data pass_data_ira
=
6050 RTL_PASS
, /* type */
6052 OPTGROUP_NONE
, /* optinfo_flags */
6054 0, /* properties_required */
6055 0, /* properties_provided */
6056 0, /* properties_destroyed */
6057 0, /* todo_flags_start */
6058 TODO_do_not_ggc_collect
, /* todo_flags_finish */
6061 class pass_ira
: public rtl_opt_pass
6064 pass_ira (gcc::context
*ctxt
)
6065 : rtl_opt_pass (pass_data_ira
, ctxt
)
6068 /* opt_pass methods: */
6069 virtual bool gate (function
*)
6071 return !targetm
.no_register_allocation
;
6073 virtual unsigned int execute (function
*)
6079 }; // class pass_ira
6084 make_pass_ira (gcc::context
*ctxt
)
6086 return new pass_ira (ctxt
);
6091 const pass_data pass_data_reload
=
6093 RTL_PASS
, /* type */
6094 "reload", /* name */
6095 OPTGROUP_NONE
, /* optinfo_flags */
6096 TV_RELOAD
, /* tv_id */
6097 0, /* properties_required */
6098 0, /* properties_provided */
6099 0, /* properties_destroyed */
6100 0, /* todo_flags_start */
6101 0, /* todo_flags_finish */
6104 class pass_reload
: public rtl_opt_pass
6107 pass_reload (gcc::context
*ctxt
)
6108 : rtl_opt_pass (pass_data_reload
, ctxt
)
6111 /* opt_pass methods: */
6112 virtual bool gate (function
*)
6114 return !targetm
.no_register_allocation
;
6116 virtual unsigned int execute (function
*)
6122 }; // class pass_reload
6127 make_pass_reload (gcc::context
*ctxt
)
6129 return new pass_reload (ctxt
);