ira.c validate_equiv_mem
[official-gcc.git] / gcc / ira.c
blob7acf680638560e01a36e13b1ac7a5e9d649e2d80
1 /* Integrated Register Allocator (IRA) entry point.
2 Copyright (C) 2006-2016 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The integrated register allocator (IRA) is a
22 regional register allocator performing graph coloring on a top-down
23 traversal of nested regions. Graph coloring in a region is based
24 on Chaitin-Briggs algorithm. It is called integrated because
25 register coalescing, register live range splitting, and choosing a
26 better hard register are done on-the-fly during coloring. Register
27 coalescing and choosing a cheaper hard register is done by hard
28 register preferencing during hard register assigning. The live
29 range splitting is a byproduct of the regional register allocation.
31 Major IRA notions are:
33 o *Region* is a part of CFG where graph coloring based on
34 Chaitin-Briggs algorithm is done. IRA can work on any set of
35 nested CFG regions forming a tree. Currently the regions are
36 the entire function for the root region and natural loops for
37 the other regions. Therefore data structure representing a
38 region is called loop_tree_node.
40 o *Allocno class* is a register class used for allocation of
41 given allocno. It means that only hard register of given
42 register class can be assigned to given allocno. In reality,
43 even smaller subset of (*profitable*) hard registers can be
44 assigned. In rare cases, the subset can be even smaller
45 because our modification of Chaitin-Briggs algorithm requires
46 that sets of hard registers can be assigned to allocnos forms a
47 forest, i.e. the sets can be ordered in a way where any
48 previous set is not intersected with given set or is a superset
49 of given set.
51 o *Pressure class* is a register class belonging to a set of
52 register classes containing all of the hard-registers available
53 for register allocation. The set of all pressure classes for a
54 target is defined in the corresponding machine-description file
55 according some criteria. Register pressure is calculated only
56 for pressure classes and it affects some IRA decisions as
57 forming allocation regions.
59 o *Allocno* represents the live range of a pseudo-register in a
60 region. Besides the obvious attributes like the corresponding
61 pseudo-register number, allocno class, conflicting allocnos and
62 conflicting hard-registers, there are a few allocno attributes
63 which are important for understanding the allocation algorithm:
65 - *Live ranges*. This is a list of ranges of *program points*
66 where the allocno lives. Program points represent places
67 where a pseudo can be born or become dead (there are
68 approximately two times more program points than the insns)
69 and they are represented by integers starting with 0. The
70 live ranges are used to find conflicts between allocnos.
71 They also play very important role for the transformation of
72 the IRA internal representation of several regions into a one
73 region representation. The later is used during the reload
74 pass work because each allocno represents all of the
75 corresponding pseudo-registers.
77 - *Hard-register costs*. This is a vector of size equal to the
78 number of available hard-registers of the allocno class. The
79 cost of a callee-clobbered hard-register for an allocno is
80 increased by the cost of save/restore code around the calls
81 through the given allocno's life. If the allocno is a move
82 instruction operand and another operand is a hard-register of
83 the allocno class, the cost of the hard-register is decreased
84 by the move cost.
86 When an allocno is assigned, the hard-register with minimal
87 full cost is used. Initially, a hard-register's full cost is
88 the corresponding value from the hard-register's cost vector.
89 If the allocno is connected by a *copy* (see below) to
90 another allocno which has just received a hard-register, the
91 cost of the hard-register is decreased. Before choosing a
92 hard-register for an allocno, the allocno's current costs of
93 the hard-registers are modified by the conflict hard-register
94 costs of all of the conflicting allocnos which are not
95 assigned yet.
97 - *Conflict hard-register costs*. This is a vector of the same
98 size as the hard-register costs vector. To permit an
99 unassigned allocno to get a better hard-register, IRA uses
100 this vector to calculate the final full cost of the
101 available hard-registers. Conflict hard-register costs of an
102 unassigned allocno are also changed with a change of the
103 hard-register cost of the allocno when a copy involving the
104 allocno is processed as described above. This is done to
105 show other unassigned allocnos that a given allocno prefers
106 some hard-registers in order to remove the move instruction
107 corresponding to the copy.
109 o *Cap*. If a pseudo-register does not live in a region but
110 lives in a nested region, IRA creates a special allocno called
111 a cap in the outer region. A region cap is also created for a
112 subregion cap.
114 o *Copy*. Allocnos can be connected by copies. Copies are used
115 to modify hard-register costs for allocnos during coloring.
116 Such modifications reflects a preference to use the same
117 hard-register for the allocnos connected by copies. Usually
118 copies are created for move insns (in this case it results in
119 register coalescing). But IRA also creates copies for operands
120 of an insn which should be assigned to the same hard-register
121 due to constraints in the machine description (it usually
122 results in removing a move generated in reload to satisfy
123 the constraints) and copies referring to the allocno which is
124 the output operand of an instruction and the allocno which is
125 an input operand dying in the instruction (creation of such
126 copies results in less register shuffling). IRA *does not*
127 create copies between the same register allocnos from different
128 regions because we use another technique for propagating
129 hard-register preference on the borders of regions.
131 Allocnos (including caps) for the upper region in the region tree
132 *accumulate* information important for coloring from allocnos with
133 the same pseudo-register from nested regions. This includes
134 hard-register and memory costs, conflicts with hard-registers,
135 allocno conflicts, allocno copies and more. *Thus, attributes for
136 allocnos in a region have the same values as if the region had no
137 subregions*. It means that attributes for allocnos in the
138 outermost region corresponding to the function have the same values
139 as though the allocation used only one region which is the entire
140 function. It also means that we can look at IRA work as if the
141 first IRA did allocation for all function then it improved the
142 allocation for loops then their subloops and so on.
144 IRA major passes are:
146 o Building IRA internal representation which consists of the
147 following subpasses:
149 * First, IRA builds regions and creates allocnos (file
150 ira-build.c) and initializes most of their attributes.
152 * Then IRA finds an allocno class for each allocno and
153 calculates its initial (non-accumulated) cost of memory and
154 each hard-register of its allocno class (file ira-cost.c).
156 * IRA creates live ranges of each allocno, calculates register
157 pressure for each pressure class in each region, sets up
158 conflict hard registers for each allocno and info about calls
159 the allocno lives through (file ira-lives.c).
161 * IRA removes low register pressure loops from the regions
162 mostly to speed IRA up (file ira-build.c).
164 * IRA propagates accumulated allocno info from lower region
165 allocnos to corresponding upper region allocnos (file
166 ira-build.c).
168 * IRA creates all caps (file ira-build.c).
170 * Having live-ranges of allocnos and their classes, IRA creates
171 conflicting allocnos for each allocno. Conflicting allocnos
172 are stored as a bit vector or array of pointers to the
173 conflicting allocnos whatever is more profitable (file
174 ira-conflicts.c). At this point IRA creates allocno copies.
176 o Coloring. Now IRA has all necessary info to start graph coloring
177 process. It is done in each region on top-down traverse of the
178 region tree (file ira-color.c). There are following subpasses:
180 * Finding profitable hard registers of corresponding allocno
181 class for each allocno. For example, only callee-saved hard
182 registers are frequently profitable for allocnos living
183 through colors. If the profitable hard register set of
184 allocno does not form a tree based on subset relation, we use
185 some approximation to form the tree. This approximation is
186 used to figure out trivial colorability of allocnos. The
187 approximation is a pretty rare case.
189 * Putting allocnos onto the coloring stack. IRA uses Briggs
190 optimistic coloring which is a major improvement over
191 Chaitin's coloring. Therefore IRA does not spill allocnos at
192 this point. There is some freedom in the order of putting
193 allocnos on the stack which can affect the final result of
194 the allocation. IRA uses some heuristics to improve the
195 order. The major one is to form *threads* from colorable
196 allocnos and push them on the stack by threads. Thread is a
197 set of non-conflicting colorable allocnos connected by
198 copies. The thread contains allocnos from the colorable
199 bucket or colorable allocnos already pushed onto the coloring
200 stack. Pushing thread allocnos one after another onto the
201 stack increases chances of removing copies when the allocnos
202 get the same hard reg.
204 We also use a modification of Chaitin-Briggs algorithm which
205 works for intersected register classes of allocnos. To
206 figure out trivial colorability of allocnos, the mentioned
207 above tree of hard register sets is used. To get an idea how
208 the algorithm works in i386 example, let us consider an
209 allocno to which any general hard register can be assigned.
210 If the allocno conflicts with eight allocnos to which only
211 EAX register can be assigned, given allocno is still
212 trivially colorable because all conflicting allocnos might be
213 assigned only to EAX and all other general hard registers are
214 still free.
216 To get an idea of the used trivial colorability criterion, it
217 is also useful to read article "Graph-Coloring Register
218 Allocation for Irregular Architectures" by Michael D. Smith
219 and Glen Holloway. Major difference between the article
220 approach and approach used in IRA is that Smith's approach
221 takes register classes only from machine description and IRA
222 calculate register classes from intermediate code too
223 (e.g. an explicit usage of hard registers in RTL code for
224 parameter passing can result in creation of additional
225 register classes which contain or exclude the hard
226 registers). That makes IRA approach useful for improving
227 coloring even for architectures with regular register files
228 and in fact some benchmarking shows the improvement for
229 regular class architectures is even bigger than for irregular
230 ones. Another difference is that Smith's approach chooses
231 intersection of classes of all insn operands in which a given
232 pseudo occurs. IRA can use bigger classes if it is still
233 more profitable than memory usage.
235 * Popping the allocnos from the stack and assigning them hard
236 registers. If IRA can not assign a hard register to an
237 allocno and the allocno is coalesced, IRA undoes the
238 coalescing and puts the uncoalesced allocnos onto the stack in
239 the hope that some such allocnos will get a hard register
240 separately. If IRA fails to assign hard register or memory
241 is more profitable for it, IRA spills the allocno. IRA
242 assigns the allocno the hard-register with minimal full
243 allocation cost which reflects the cost of usage of the
244 hard-register for the allocno and cost of usage of the
245 hard-register for allocnos conflicting with given allocno.
247 * Chaitin-Briggs coloring assigns as many pseudos as possible
248 to hard registers. After coloring we try to improve
249 allocation with cost point of view. We improve the
250 allocation by spilling some allocnos and assigning the freed
251 hard registers to other allocnos if it decreases the overall
252 allocation cost.
254 * After allocno assigning in the region, IRA modifies the hard
255 register and memory costs for the corresponding allocnos in
256 the subregions to reflect the cost of possible loads, stores,
257 or moves on the border of the region and its subregions.
258 When default regional allocation algorithm is used
259 (-fira-algorithm=mixed), IRA just propagates the assignment
260 for allocnos if the register pressure in the region for the
261 corresponding pressure class is less than number of available
262 hard registers for given pressure class.
264 o Spill/restore code moving. When IRA performs an allocation
265 by traversing regions in top-down order, it does not know what
266 happens below in the region tree. Therefore, sometimes IRA
267 misses opportunities to perform a better allocation. A simple
268 optimization tries to improve allocation in a region having
269 subregions and containing in another region. If the
270 corresponding allocnos in the subregion are spilled, it spills
271 the region allocno if it is profitable. The optimization
272 implements a simple iterative algorithm performing profitable
273 transformations while they are still possible. It is fast in
274 practice, so there is no real need for a better time complexity
275 algorithm.
277 o Code change. After coloring, two allocnos representing the
278 same pseudo-register outside and inside a region respectively
279 may be assigned to different locations (hard-registers or
280 memory). In this case IRA creates and uses a new
281 pseudo-register inside the region and adds code to move allocno
282 values on the region's borders. This is done during top-down
283 traversal of the regions (file ira-emit.c). In some
284 complicated cases IRA can create a new allocno to move allocno
285 values (e.g. when a swap of values stored in two hard-registers
286 is needed). At this stage, the new allocno is marked as
287 spilled. IRA still creates the pseudo-register and the moves
288 on the region borders even when both allocnos were assigned to
289 the same hard-register. If the reload pass spills a
290 pseudo-register for some reason, the effect will be smaller
291 because another allocno will still be in the hard-register. In
292 most cases, this is better then spilling both allocnos. If
293 reload does not change the allocation for the two
294 pseudo-registers, the trivial move will be removed by
295 post-reload optimizations. IRA does not generate moves for
296 allocnos assigned to the same hard register when the default
297 regional allocation algorithm is used and the register pressure
298 in the region for the corresponding pressure class is less than
299 number of available hard registers for given pressure class.
300 IRA also does some optimizations to remove redundant stores and
301 to reduce code duplication on the region borders.
303 o Flattening internal representation. After changing code, IRA
304 transforms its internal representation for several regions into
305 one region representation (file ira-build.c). This process is
306 called IR flattening. Such process is more complicated than IR
307 rebuilding would be, but is much faster.
309 o After IR flattening, IRA tries to assign hard registers to all
310 spilled allocnos. This is implemented by a simple and fast
311 priority coloring algorithm (see function
312 ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos
313 created during the code change pass can be assigned to hard
314 registers.
316 o At the end IRA calls the reload pass. The reload pass
317 communicates with IRA through several functions in file
318 ira-color.c to improve its decisions in
320 * sharing stack slots for the spilled pseudos based on IRA info
321 about pseudo-register conflicts.
323 * reassigning hard-registers to all spilled pseudos at the end
324 of each reload iteration.
326 * choosing a better hard-register to spill based on IRA info
327 about pseudo-register live ranges and the register pressure
328 in places where the pseudo-register lives.
330 IRA uses a lot of data representing the target processors. These
331 data are initialized in file ira.c.
333 If function has no loops (or the loops are ignored when
334 -fira-algorithm=CB is used), we have classic Chaitin-Briggs
335 coloring (only instead of separate pass of coalescing, we use hard
336 register preferencing). In such case, IRA works much faster
337 because many things are not made (like IR flattening, the
338 spill/restore optimization, and the code change).
340 Literature is worth to read for better understanding the code:
342 o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to
343 Graph Coloring Register Allocation.
345 o David Callahan, Brian Koblenz. Register allocation via
346 hierarchical graph coloring.
348 o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
349 Coloring Register Allocation: A Study of the Chaitin-Briggs and
350 Callahan-Koblenz Algorithms.
352 o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
353 Register Allocation Based on Graph Fusion.
355 o Michael D. Smith and Glenn Holloway. Graph-Coloring Register
356 Allocation for Irregular Architectures
358 o Vladimir Makarov. The Integrated Register Allocator for GCC.
360 o Vladimir Makarov. The top-down register allocator for irregular
361 register file architectures.
366 #include "config.h"
367 #include "system.h"
368 #include "coretypes.h"
369 #include "backend.h"
370 #include "target.h"
371 #include "rtl.h"
372 #include "tree.h"
373 #include "df.h"
374 #include "tm_p.h"
375 #include "insn-config.h"
376 #include "regs.h"
377 #include "ira.h"
378 #include "ira-int.h"
379 #include "diagnostic-core.h"
380 #include "cfgrtl.h"
381 #include "cfgbuild.h"
382 #include "cfgcleanup.h"
383 #include "expr.h"
384 #include "tree-pass.h"
385 #include "output.h"
386 #include "reload.h"
387 #include "cfgloop.h"
388 #include "lra.h"
389 #include "dce.h"
390 #include "dbgcnt.h"
391 #include "rtl-iter.h"
392 #include "shrink-wrap.h"
393 #include "print-rtl.h"
395 struct target_ira default_target_ira;
396 struct target_ira_int default_target_ira_int;
397 #if SWITCHABLE_TARGET
398 struct target_ira *this_target_ira = &default_target_ira;
399 struct target_ira_int *this_target_ira_int = &default_target_ira_int;
400 #endif
402 /* A modified value of flag `-fira-verbose' used internally. */
403 int internal_flag_ira_verbose;
405 /* Dump file of the allocator if it is not NULL. */
406 FILE *ira_dump_file;
408 /* The number of elements in the following array. */
409 int ira_spilled_reg_stack_slots_num;
411 /* The following array contains info about spilled pseudo-registers
412 stack slots used in current function so far. */
413 struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
415 /* Correspondingly overall cost of the allocation, overall cost before
416 reload, cost of the allocnos assigned to hard-registers, cost of
417 the allocnos assigned to memory, cost of loads, stores and register
418 move insns generated for pseudo-register live range splitting (see
419 ira-emit.c). */
420 int64_t ira_overall_cost, overall_cost_before;
421 int64_t ira_reg_cost, ira_mem_cost;
422 int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost;
423 int ira_move_loops_num, ira_additional_jumps_num;
425 /* All registers that can be eliminated. */
427 HARD_REG_SET eliminable_regset;
429 /* Value of max_reg_num () before IRA work start. This value helps
430 us to recognize a situation when new pseudos were created during
431 IRA work. */
432 static int max_regno_before_ira;
434 /* Temporary hard reg set used for a different calculation. */
435 static HARD_REG_SET temp_hard_regset;
437 #define last_mode_for_init_move_cost \
438 (this_target_ira_int->x_last_mode_for_init_move_cost)
441 /* The function sets up the map IRA_REG_MODE_HARD_REGSET. */
442 static void
443 setup_reg_mode_hard_regset (void)
445 int i, m, hard_regno;
447 for (m = 0; m < NUM_MACHINE_MODES; m++)
448 for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
450 CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
451 for (i = hard_regno_nregs[hard_regno][m] - 1; i >= 0; i--)
452 if (hard_regno + i < FIRST_PSEUDO_REGISTER)
453 SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
454 hard_regno + i);
459 #define no_unit_alloc_regs \
460 (this_target_ira_int->x_no_unit_alloc_regs)
462 /* The function sets up the three arrays declared above. */
463 static void
464 setup_class_hard_regs (void)
466 int cl, i, hard_regno, n;
467 HARD_REG_SET processed_hard_reg_set;
469 ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
470 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
472 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
473 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
474 CLEAR_HARD_REG_SET (processed_hard_reg_set);
475 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
477 ira_non_ordered_class_hard_regs[cl][i] = -1;
478 ira_class_hard_reg_index[cl][i] = -1;
480 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
482 #ifdef REG_ALLOC_ORDER
483 hard_regno = reg_alloc_order[i];
484 #else
485 hard_regno = i;
486 #endif
487 if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
488 continue;
489 SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
490 if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
491 ira_class_hard_reg_index[cl][hard_regno] = -1;
492 else
494 ira_class_hard_reg_index[cl][hard_regno] = n;
495 ira_class_hard_regs[cl][n++] = hard_regno;
498 ira_class_hard_regs_num[cl] = n;
499 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
500 if (TEST_HARD_REG_BIT (temp_hard_regset, i))
501 ira_non_ordered_class_hard_regs[cl][n++] = i;
502 ira_assert (ira_class_hard_regs_num[cl] == n);
506 /* Set up global variables defining info about hard registers for the
507 allocation. These depend on USE_HARD_FRAME_P whose TRUE value means
508 that we can use the hard frame pointer for the allocation. */
509 static void
510 setup_alloc_regs (bool use_hard_frame_p)
512 #ifdef ADJUST_REG_ALLOC_ORDER
513 ADJUST_REG_ALLOC_ORDER;
514 #endif
515 COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_reg_set);
516 if (! use_hard_frame_p)
517 SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
518 setup_class_hard_regs ();
523 #define alloc_reg_class_subclasses \
524 (this_target_ira_int->x_alloc_reg_class_subclasses)
526 /* Initialize the table of subclasses of each reg class. */
527 static void
528 setup_reg_subclasses (void)
530 int i, j;
531 HARD_REG_SET temp_hard_regset2;
533 for (i = 0; i < N_REG_CLASSES; i++)
534 for (j = 0; j < N_REG_CLASSES; j++)
535 alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES;
537 for (i = 0; i < N_REG_CLASSES; i++)
539 if (i == (int) NO_REGS)
540 continue;
542 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
543 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
544 if (hard_reg_set_empty_p (temp_hard_regset))
545 continue;
546 for (j = 0; j < N_REG_CLASSES; j++)
547 if (i != j)
549 enum reg_class *p;
551 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
552 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
553 if (! hard_reg_set_subset_p (temp_hard_regset,
554 temp_hard_regset2))
555 continue;
556 p = &alloc_reg_class_subclasses[j][0];
557 while (*p != LIM_REG_CLASSES) p++;
558 *p = (enum reg_class) i;
565 /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST. */
566 static void
567 setup_class_subset_and_memory_move_costs (void)
569 int cl, cl2, mode, cost;
570 HARD_REG_SET temp_hard_regset2;
572 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
573 ira_memory_move_cost[mode][NO_REGS][0]
574 = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
575 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
577 if (cl != (int) NO_REGS)
578 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
580 ira_max_memory_move_cost[mode][cl][0]
581 = ira_memory_move_cost[mode][cl][0]
582 = memory_move_cost ((machine_mode) mode,
583 (reg_class_t) cl, false);
584 ira_max_memory_move_cost[mode][cl][1]
585 = ira_memory_move_cost[mode][cl][1]
586 = memory_move_cost ((machine_mode) mode,
587 (reg_class_t) cl, true);
588 /* Costs for NO_REGS are used in cost calculation on the
589 1st pass when the preferred register classes are not
590 known yet. In this case we take the best scenario. */
591 if (ira_memory_move_cost[mode][NO_REGS][0]
592 > ira_memory_move_cost[mode][cl][0])
593 ira_max_memory_move_cost[mode][NO_REGS][0]
594 = ira_memory_move_cost[mode][NO_REGS][0]
595 = ira_memory_move_cost[mode][cl][0];
596 if (ira_memory_move_cost[mode][NO_REGS][1]
597 > ira_memory_move_cost[mode][cl][1])
598 ira_max_memory_move_cost[mode][NO_REGS][1]
599 = ira_memory_move_cost[mode][NO_REGS][1]
600 = ira_memory_move_cost[mode][cl][1];
603 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
604 for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
606 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
607 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
608 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
609 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
610 ira_class_subset_p[cl][cl2]
611 = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
612 if (! hard_reg_set_empty_p (temp_hard_regset2)
613 && hard_reg_set_subset_p (reg_class_contents[cl2],
614 reg_class_contents[cl]))
615 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
617 cost = ira_memory_move_cost[mode][cl2][0];
618 if (cost > ira_max_memory_move_cost[mode][cl][0])
619 ira_max_memory_move_cost[mode][cl][0] = cost;
620 cost = ira_memory_move_cost[mode][cl2][1];
621 if (cost > ira_max_memory_move_cost[mode][cl][1])
622 ira_max_memory_move_cost[mode][cl][1] = cost;
625 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
626 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
628 ira_memory_move_cost[mode][cl][0]
629 = ira_max_memory_move_cost[mode][cl][0];
630 ira_memory_move_cost[mode][cl][1]
631 = ira_max_memory_move_cost[mode][cl][1];
633 setup_reg_subclasses ();
638 /* Define the following macro if allocation through malloc if
639 preferable. */
640 #define IRA_NO_OBSTACK
642 #ifndef IRA_NO_OBSTACK
643 /* Obstack used for storing all dynamic data (except bitmaps) of the
644 IRA. */
645 static struct obstack ira_obstack;
646 #endif
648 /* Obstack used for storing all bitmaps of the IRA. */
649 static struct bitmap_obstack ira_bitmap_obstack;
651 /* Allocate memory of size LEN for IRA data. */
652 void *
653 ira_allocate (size_t len)
655 void *res;
657 #ifndef IRA_NO_OBSTACK
658 res = obstack_alloc (&ira_obstack, len);
659 #else
660 res = xmalloc (len);
661 #endif
662 return res;
665 /* Free memory ADDR allocated for IRA data. */
666 void
667 ira_free (void *addr ATTRIBUTE_UNUSED)
669 #ifndef IRA_NO_OBSTACK
670 /* do nothing */
671 #else
672 free (addr);
673 #endif
677 /* Allocate and returns bitmap for IRA. */
678 bitmap
679 ira_allocate_bitmap (void)
681 return BITMAP_ALLOC (&ira_bitmap_obstack);
684 /* Free bitmap B allocated for IRA. */
685 void
686 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
688 /* do nothing */
693 /* Output information about allocation of all allocnos (except for
694 caps) into file F. */
695 void
696 ira_print_disposition (FILE *f)
698 int i, n, max_regno;
699 ira_allocno_t a;
700 basic_block bb;
702 fprintf (f, "Disposition:");
703 max_regno = max_reg_num ();
704 for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
705 for (a = ira_regno_allocno_map[i];
706 a != NULL;
707 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
709 if (n % 4 == 0)
710 fprintf (f, "\n");
711 n++;
712 fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
713 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
714 fprintf (f, "b%-3d", bb->index);
715 else
716 fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
717 if (ALLOCNO_HARD_REGNO (a) >= 0)
718 fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
719 else
720 fprintf (f, " mem");
722 fprintf (f, "\n");
725 /* Outputs information about allocation of all allocnos into
726 stderr. */
727 void
728 ira_debug_disposition (void)
730 ira_print_disposition (stderr);
735 /* Set up ira_stack_reg_pressure_class which is the biggest pressure
736 register class containing stack registers or NO_REGS if there are
737 no stack registers. To find this class, we iterate through all
738 register pressure classes and choose the first register pressure
739 class containing all the stack registers and having the biggest
740 size. */
741 static void
742 setup_stack_reg_pressure_class (void)
744 ira_stack_reg_pressure_class = NO_REGS;
745 #ifdef STACK_REGS
747 int i, best, size;
748 enum reg_class cl;
749 HARD_REG_SET temp_hard_regset2;
751 CLEAR_HARD_REG_SET (temp_hard_regset);
752 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
753 SET_HARD_REG_BIT (temp_hard_regset, i);
754 best = 0;
755 for (i = 0; i < ira_pressure_classes_num; i++)
757 cl = ira_pressure_classes[i];
758 COPY_HARD_REG_SET (temp_hard_regset2, temp_hard_regset);
759 AND_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
760 size = hard_reg_set_size (temp_hard_regset2);
761 if (best < size)
763 best = size;
764 ira_stack_reg_pressure_class = cl;
768 #endif
771 /* Find pressure classes which are register classes for which we
772 calculate register pressure in IRA, register pressure sensitive
773 insn scheduling, and register pressure sensitive loop invariant
774 motion.
776 To make register pressure calculation easy, we always use
777 non-intersected register pressure classes. A move of hard
778 registers from one register pressure class is not more expensive
779 than load and store of the hard registers. Most likely an allocno
780 class will be a subset of a register pressure class and in many
781 cases a register pressure class. That makes usage of register
782 pressure classes a good approximation to find a high register
783 pressure. */
784 static void
785 setup_pressure_classes (void)
787 int cost, i, n, curr;
788 int cl, cl2;
789 enum reg_class pressure_classes[N_REG_CLASSES];
790 int m;
791 HARD_REG_SET temp_hard_regset2;
792 bool insert_p;
794 n = 0;
795 for (cl = 0; cl < N_REG_CLASSES; cl++)
797 if (ira_class_hard_regs_num[cl] == 0)
798 continue;
799 if (ira_class_hard_regs_num[cl] != 1
800 /* A register class without subclasses may contain a few
801 hard registers and movement between them is costly
802 (e.g. SPARC FPCC registers). We still should consider it
803 as a candidate for a pressure class. */
804 && alloc_reg_class_subclasses[cl][0] < cl)
806 /* Check that the moves between any hard registers of the
807 current class are not more expensive for a legal mode
808 than load/store of the hard registers of the current
809 class. Such class is a potential candidate to be a
810 register pressure class. */
811 for (m = 0; m < NUM_MACHINE_MODES; m++)
813 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
814 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
815 AND_COMPL_HARD_REG_SET (temp_hard_regset,
816 ira_prohibited_class_mode_regs[cl][m]);
817 if (hard_reg_set_empty_p (temp_hard_regset))
818 continue;
819 ira_init_register_move_cost_if_necessary ((machine_mode) m);
820 cost = ira_register_move_cost[m][cl][cl];
821 if (cost <= ira_max_memory_move_cost[m][cl][1]
822 || cost <= ira_max_memory_move_cost[m][cl][0])
823 break;
825 if (m >= NUM_MACHINE_MODES)
826 continue;
828 curr = 0;
829 insert_p = true;
830 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
831 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
832 /* Remove so far added pressure classes which are subset of the
833 current candidate class. Prefer GENERAL_REGS as a pressure
834 register class to another class containing the same
835 allocatable hard registers. We do this because machine
836 dependent cost hooks might give wrong costs for the latter
837 class but always give the right cost for the former class
838 (GENERAL_REGS). */
839 for (i = 0; i < n; i++)
841 cl2 = pressure_classes[i];
842 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
843 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
844 if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
845 && (! hard_reg_set_equal_p (temp_hard_regset, temp_hard_regset2)
846 || cl2 == (int) GENERAL_REGS))
848 pressure_classes[curr++] = (enum reg_class) cl2;
849 insert_p = false;
850 continue;
852 if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)
853 && (! hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset)
854 || cl == (int) GENERAL_REGS))
855 continue;
856 if (hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset))
857 insert_p = false;
858 pressure_classes[curr++] = (enum reg_class) cl2;
860 /* If the current candidate is a subset of a so far added
861 pressure class, don't add it to the list of the pressure
862 classes. */
863 if (insert_p)
864 pressure_classes[curr++] = (enum reg_class) cl;
865 n = curr;
867 #ifdef ENABLE_IRA_CHECKING
869 HARD_REG_SET ignore_hard_regs;
871 /* Check pressure classes correctness: here we check that hard
872 registers from all register pressure classes contains all hard
873 registers available for the allocation. */
874 CLEAR_HARD_REG_SET (temp_hard_regset);
875 CLEAR_HARD_REG_SET (temp_hard_regset2);
876 COPY_HARD_REG_SET (ignore_hard_regs, no_unit_alloc_regs);
877 for (cl = 0; cl < LIM_REG_CLASSES; cl++)
879 /* For some targets (like MIPS with MD_REGS), there are some
880 classes with hard registers available for allocation but
881 not able to hold value of any mode. */
882 for (m = 0; m < NUM_MACHINE_MODES; m++)
883 if (contains_reg_of_mode[cl][m])
884 break;
885 if (m >= NUM_MACHINE_MODES)
887 IOR_HARD_REG_SET (ignore_hard_regs, reg_class_contents[cl]);
888 continue;
890 for (i = 0; i < n; i++)
891 if ((int) pressure_classes[i] == cl)
892 break;
893 IOR_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
894 if (i < n)
895 IOR_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
897 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
898 /* Some targets (like SPARC with ICC reg) have allocatable regs
899 for which no reg class is defined. */
900 if (REGNO_REG_CLASS (i) == NO_REGS)
901 SET_HARD_REG_BIT (ignore_hard_regs, i);
902 AND_COMPL_HARD_REG_SET (temp_hard_regset, ignore_hard_regs);
903 AND_COMPL_HARD_REG_SET (temp_hard_regset2, ignore_hard_regs);
904 ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset));
906 #endif
907 ira_pressure_classes_num = 0;
908 for (i = 0; i < n; i++)
910 cl = (int) pressure_classes[i];
911 ira_reg_pressure_class_p[cl] = true;
912 ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl;
914 setup_stack_reg_pressure_class ();
917 /* Set up IRA_UNIFORM_CLASS_P. Uniform class is a register class
918 whose register move cost between any registers of the class is the
919 same as for all its subclasses. We use the data to speed up the
920 2nd pass of calculations of allocno costs. */
921 static void
922 setup_uniform_class_p (void)
924 int i, cl, cl2, m;
926 for (cl = 0; cl < N_REG_CLASSES; cl++)
928 ira_uniform_class_p[cl] = false;
929 if (ira_class_hard_regs_num[cl] == 0)
930 continue;
931 /* We can not use alloc_reg_class_subclasses here because move
932 cost hooks does not take into account that some registers are
933 unavailable for the subtarget. E.g. for i686, INT_SSE_REGS
934 is element of alloc_reg_class_subclasses for GENERAL_REGS
935 because SSE regs are unavailable. */
936 for (i = 0; (cl2 = reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; i++)
938 if (ira_class_hard_regs_num[cl2] == 0)
939 continue;
940 for (m = 0; m < NUM_MACHINE_MODES; m++)
941 if (contains_reg_of_mode[cl][m] && contains_reg_of_mode[cl2][m])
943 ira_init_register_move_cost_if_necessary ((machine_mode) m);
944 if (ira_register_move_cost[m][cl][cl]
945 != ira_register_move_cost[m][cl2][cl2])
946 break;
948 if (m < NUM_MACHINE_MODES)
949 break;
951 if (cl2 == LIM_REG_CLASSES)
952 ira_uniform_class_p[cl] = true;
956 /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM,
957 IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM.
959 Target may have many subtargets and not all target hard registers can
960 be used for allocation, e.g. x86 port in 32-bit mode can not use
961 hard registers introduced in x86-64 like r8-r15). Some classes
962 might have the same allocatable hard registers, e.g. INDEX_REGS
963 and GENERAL_REGS in x86 port in 32-bit mode. To decrease different
964 calculations efforts we introduce allocno classes which contain
965 unique non-empty sets of allocatable hard-registers.
967 Pseudo class cost calculation in ira-costs.c is very expensive.
968 Therefore we are trying to decrease number of classes involved in
969 such calculation. Register classes used in the cost calculation
970 are called important classes. They are allocno classes and other
971 non-empty classes whose allocatable hard register sets are inside
972 of an allocno class hard register set. From the first sight, it
973 looks like that they are just allocno classes. It is not true. In
974 example of x86-port in 32-bit mode, allocno classes will contain
975 GENERAL_REGS but not LEGACY_REGS (because allocatable hard
976 registers are the same for the both classes). The important
977 classes will contain GENERAL_REGS and LEGACY_REGS. It is done
978 because a machine description insn constraint may refers for
979 LEGACY_REGS and code in ira-costs.c is mostly base on investigation
980 of the insn constraints. */
981 static void
982 setup_allocno_and_important_classes (void)
984 int i, j, n, cl;
985 bool set_p;
986 HARD_REG_SET temp_hard_regset2;
987 static enum reg_class classes[LIM_REG_CLASSES + 1];
989 n = 0;
990 /* Collect classes which contain unique sets of allocatable hard
991 registers. Prefer GENERAL_REGS to other classes containing the
992 same set of hard registers. */
993 for (i = 0; i < LIM_REG_CLASSES; i++)
995 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
996 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
997 for (j = 0; j < n; j++)
999 cl = classes[j];
1000 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
1001 AND_COMPL_HARD_REG_SET (temp_hard_regset2,
1002 no_unit_alloc_regs);
1003 if (hard_reg_set_equal_p (temp_hard_regset,
1004 temp_hard_regset2))
1005 break;
1007 if (j >= n)
1008 classes[n++] = (enum reg_class) i;
1009 else if (i == GENERAL_REGS)
1010 /* Prefer general regs. For i386 example, it means that
1011 we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS
1012 (all of them consists of the same available hard
1013 registers). */
1014 classes[j] = (enum reg_class) i;
1016 classes[n] = LIM_REG_CLASSES;
1018 /* Set up classes which can be used for allocnos as classes
1019 containing non-empty unique sets of allocatable hard
1020 registers. */
1021 ira_allocno_classes_num = 0;
1022 for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
1023 if (ira_class_hard_regs_num[cl] > 0)
1024 ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl;
1025 ira_important_classes_num = 0;
1026 /* Add non-allocno classes containing to non-empty set of
1027 allocatable hard regs. */
1028 for (cl = 0; cl < N_REG_CLASSES; cl++)
1029 if (ira_class_hard_regs_num[cl] > 0)
1031 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1032 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1033 set_p = false;
1034 for (j = 0; j < ira_allocno_classes_num; j++)
1036 COPY_HARD_REG_SET (temp_hard_regset2,
1037 reg_class_contents[ira_allocno_classes[j]]);
1038 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
1039 if ((enum reg_class) cl == ira_allocno_classes[j])
1040 break;
1041 else if (hard_reg_set_subset_p (temp_hard_regset,
1042 temp_hard_regset2))
1043 set_p = true;
1045 if (set_p && j >= ira_allocno_classes_num)
1046 ira_important_classes[ira_important_classes_num++]
1047 = (enum reg_class) cl;
1049 /* Now add allocno classes to the important classes. */
1050 for (j = 0; j < ira_allocno_classes_num; j++)
1051 ira_important_classes[ira_important_classes_num++]
1052 = ira_allocno_classes[j];
1053 for (cl = 0; cl < N_REG_CLASSES; cl++)
1055 ira_reg_allocno_class_p[cl] = false;
1056 ira_reg_pressure_class_p[cl] = false;
1058 for (j = 0; j < ira_allocno_classes_num; j++)
1059 ira_reg_allocno_class_p[ira_allocno_classes[j]] = true;
1060 setup_pressure_classes ();
1061 setup_uniform_class_p ();
1064 /* Setup translation in CLASS_TRANSLATE of all classes into a class
1065 given by array CLASSES of length CLASSES_NUM. The function is used
1066 make translation any reg class to an allocno class or to an
1067 pressure class. This translation is necessary for some
1068 calculations when we can use only allocno or pressure classes and
1069 such translation represents an approximate representation of all
1070 classes.
1072 The translation in case when allocatable hard register set of a
1073 given class is subset of allocatable hard register set of a class
1074 in CLASSES is pretty simple. We use smallest classes from CLASSES
1075 containing a given class. If allocatable hard register set of a
1076 given class is not a subset of any corresponding set of a class
1077 from CLASSES, we use the cheapest (with load/store point of view)
1078 class from CLASSES whose set intersects with given class set. */
1079 static void
1080 setup_class_translate_array (enum reg_class *class_translate,
1081 int classes_num, enum reg_class *classes)
1083 int cl, mode;
1084 enum reg_class aclass, best_class, *cl_ptr;
1085 int i, cost, min_cost, best_cost;
1087 for (cl = 0; cl < N_REG_CLASSES; cl++)
1088 class_translate[cl] = NO_REGS;
1090 for (i = 0; i < classes_num; i++)
1092 aclass = classes[i];
1093 for (cl_ptr = &alloc_reg_class_subclasses[aclass][0];
1094 (cl = *cl_ptr) != LIM_REG_CLASSES;
1095 cl_ptr++)
1096 if (class_translate[cl] == NO_REGS)
1097 class_translate[cl] = aclass;
1098 class_translate[aclass] = aclass;
1100 /* For classes which are not fully covered by one of given classes
1101 (in other words covered by more one given class), use the
1102 cheapest class. */
1103 for (cl = 0; cl < N_REG_CLASSES; cl++)
1105 if (cl == NO_REGS || class_translate[cl] != NO_REGS)
1106 continue;
1107 best_class = NO_REGS;
1108 best_cost = INT_MAX;
1109 for (i = 0; i < classes_num; i++)
1111 aclass = classes[i];
1112 COPY_HARD_REG_SET (temp_hard_regset,
1113 reg_class_contents[aclass]);
1114 AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1115 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1116 if (! hard_reg_set_empty_p (temp_hard_regset))
1118 min_cost = INT_MAX;
1119 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1121 cost = (ira_memory_move_cost[mode][aclass][0]
1122 + ira_memory_move_cost[mode][aclass][1]);
1123 if (min_cost > cost)
1124 min_cost = cost;
1126 if (best_class == NO_REGS || best_cost > min_cost)
1128 best_class = aclass;
1129 best_cost = min_cost;
1133 class_translate[cl] = best_class;
1137 /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and
1138 IRA_PRESSURE_CLASS_TRANSLATE. */
1139 static void
1140 setup_class_translate (void)
1142 setup_class_translate_array (ira_allocno_class_translate,
1143 ira_allocno_classes_num, ira_allocno_classes);
1144 setup_class_translate_array (ira_pressure_class_translate,
1145 ira_pressure_classes_num, ira_pressure_classes);
1148 /* Order numbers of allocno classes in original target allocno class
1149 array, -1 for non-allocno classes. */
1150 static int allocno_class_order[N_REG_CLASSES];
1152 /* The function used to sort the important classes. */
1153 static int
1154 comp_reg_classes_func (const void *v1p, const void *v2p)
1156 enum reg_class cl1 = *(const enum reg_class *) v1p;
1157 enum reg_class cl2 = *(const enum reg_class *) v2p;
1158 enum reg_class tcl1, tcl2;
1159 int diff;
1161 tcl1 = ira_allocno_class_translate[cl1];
1162 tcl2 = ira_allocno_class_translate[cl2];
1163 if (tcl1 != NO_REGS && tcl2 != NO_REGS
1164 && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0)
1165 return diff;
1166 return (int) cl1 - (int) cl2;
1169 /* For correct work of function setup_reg_class_relation we need to
1170 reorder important classes according to the order of their allocno
1171 classes. It places important classes containing the same
1172 allocatable hard register set adjacent to each other and allocno
1173 class with the allocatable hard register set right after the other
1174 important classes with the same set.
1176 In example from comments of function
1177 setup_allocno_and_important_classes, it places LEGACY_REGS and
1178 GENERAL_REGS close to each other and GENERAL_REGS is after
1179 LEGACY_REGS. */
1180 static void
1181 reorder_important_classes (void)
1183 int i;
1185 for (i = 0; i < N_REG_CLASSES; i++)
1186 allocno_class_order[i] = -1;
1187 for (i = 0; i < ira_allocno_classes_num; i++)
1188 allocno_class_order[ira_allocno_classes[i]] = i;
1189 qsort (ira_important_classes, ira_important_classes_num,
1190 sizeof (enum reg_class), comp_reg_classes_func);
1191 for (i = 0; i < ira_important_classes_num; i++)
1192 ira_important_class_nums[ira_important_classes[i]] = i;
1195 /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION,
1196 IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and
1197 IRA_REG_CLASSES_INTERSECT_P. For the meaning of the relations,
1198 please see corresponding comments in ira-int.h. */
1199 static void
1200 setup_reg_class_relations (void)
1202 int i, cl1, cl2, cl3;
1203 HARD_REG_SET intersection_set, union_set, temp_set2;
1204 bool important_class_p[N_REG_CLASSES];
1206 memset (important_class_p, 0, sizeof (important_class_p));
1207 for (i = 0; i < ira_important_classes_num; i++)
1208 important_class_p[ira_important_classes[i]] = true;
1209 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1211 ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES;
1212 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1214 ira_reg_classes_intersect_p[cl1][cl2] = false;
1215 ira_reg_class_intersect[cl1][cl2] = NO_REGS;
1216 ira_reg_class_subset[cl1][cl2] = NO_REGS;
1217 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
1218 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1219 COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
1220 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1221 if (hard_reg_set_empty_p (temp_hard_regset)
1222 && hard_reg_set_empty_p (temp_set2))
1224 /* The both classes have no allocatable hard registers
1225 -- take all class hard registers into account and use
1226 reg_class_subunion and reg_class_superunion. */
1227 for (i = 0;; i++)
1229 cl3 = reg_class_subclasses[cl1][i];
1230 if (cl3 == LIM_REG_CLASSES)
1231 break;
1232 if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
1233 (enum reg_class) cl3))
1234 ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1236 ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2];
1237 ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2];
1238 continue;
1240 ira_reg_classes_intersect_p[cl1][cl2]
1241 = hard_reg_set_intersect_p (temp_hard_regset, temp_set2);
1242 if (important_class_p[cl1] && important_class_p[cl2]
1243 && hard_reg_set_subset_p (temp_hard_regset, temp_set2))
1245 /* CL1 and CL2 are important classes and CL1 allocatable
1246 hard register set is inside of CL2 allocatable hard
1247 registers -- make CL1 a superset of CL2. */
1248 enum reg_class *p;
1250 p = &ira_reg_class_super_classes[cl1][0];
1251 while (*p != LIM_REG_CLASSES)
1252 p++;
1253 *p++ = (enum reg_class) cl2;
1254 *p = LIM_REG_CLASSES;
1256 ira_reg_class_subunion[cl1][cl2] = NO_REGS;
1257 ira_reg_class_superunion[cl1][cl2] = NO_REGS;
1258 COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
1259 AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
1260 AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
1261 COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
1262 IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
1263 AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
1264 for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
1266 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
1267 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1268 if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
1270 /* CL3 allocatable hard register set is inside of
1271 intersection of allocatable hard register sets
1272 of CL1 and CL2. */
1273 if (important_class_p[cl3])
1275 COPY_HARD_REG_SET
1276 (temp_set2,
1277 reg_class_contents
1278 [(int) ira_reg_class_intersect[cl1][cl2]]);
1279 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1280 if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1281 /* If the allocatable hard register sets are
1282 the same, prefer GENERAL_REGS or the
1283 smallest class for debugging
1284 purposes. */
1285 || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
1286 && (cl3 == GENERAL_REGS
1287 || ((ira_reg_class_intersect[cl1][cl2]
1288 != GENERAL_REGS)
1289 && hard_reg_set_subset_p
1290 (reg_class_contents[cl3],
1291 reg_class_contents
1292 [(int)
1293 ira_reg_class_intersect[cl1][cl2]])))))
1294 ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1296 COPY_HARD_REG_SET
1297 (temp_set2,
1298 reg_class_contents[(int) ira_reg_class_subset[cl1][cl2]]);
1299 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1300 if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1301 /* Ignore unavailable hard registers and prefer
1302 smallest class for debugging purposes. */
1303 || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
1304 && hard_reg_set_subset_p
1305 (reg_class_contents[cl3],
1306 reg_class_contents
1307 [(int) ira_reg_class_subset[cl1][cl2]])))
1308 ira_reg_class_subset[cl1][cl2] = (enum reg_class) cl3;
1310 if (important_class_p[cl3]
1311 && hard_reg_set_subset_p (temp_hard_regset, union_set))
1313 /* CL3 allocatable hard register set is inside of
1314 union of allocatable hard register sets of CL1
1315 and CL2. */
1316 COPY_HARD_REG_SET
1317 (temp_set2,
1318 reg_class_contents[(int) ira_reg_class_subunion[cl1][cl2]]);
1319 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1320 if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
1321 || (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
1323 && (! hard_reg_set_equal_p (temp_set2,
1324 temp_hard_regset)
1325 || cl3 == GENERAL_REGS
1326 /* If the allocatable hard register sets are the
1327 same, prefer GENERAL_REGS or the smallest
1328 class for debugging purposes. */
1329 || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS
1330 && hard_reg_set_subset_p
1331 (reg_class_contents[cl3],
1332 reg_class_contents
1333 [(int) ira_reg_class_subunion[cl1][cl2]])))))
1334 ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3;
1336 if (hard_reg_set_subset_p (union_set, temp_hard_regset))
1338 /* CL3 allocatable hard register set contains union
1339 of allocatable hard register sets of CL1 and
1340 CL2. */
1341 COPY_HARD_REG_SET
1342 (temp_set2,
1343 reg_class_contents[(int) ira_reg_class_superunion[cl1][cl2]]);
1344 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1345 if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
1346 || (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1348 && (! hard_reg_set_equal_p (temp_set2,
1349 temp_hard_regset)
1350 || cl3 == GENERAL_REGS
1351 /* If the allocatable hard register sets are the
1352 same, prefer GENERAL_REGS or the smallest
1353 class for debugging purposes. */
1354 || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS
1355 && hard_reg_set_subset_p
1356 (reg_class_contents[cl3],
1357 reg_class_contents
1358 [(int) ira_reg_class_superunion[cl1][cl2]])))))
1359 ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3;
1366 /* Output all uniform and important classes into file F. */
1367 static void
1368 print_uniform_and_important_classes (FILE *f)
1370 int i, cl;
1372 fprintf (f, "Uniform classes:\n");
1373 for (cl = 0; cl < N_REG_CLASSES; cl++)
1374 if (ira_uniform_class_p[cl])
1375 fprintf (f, " %s", reg_class_names[cl]);
1376 fprintf (f, "\nImportant classes:\n");
1377 for (i = 0; i < ira_important_classes_num; i++)
1378 fprintf (f, " %s", reg_class_names[ira_important_classes[i]]);
1379 fprintf (f, "\n");
1382 /* Output all possible allocno or pressure classes and their
1383 translation map into file F. */
1384 static void
1385 print_translated_classes (FILE *f, bool pressure_p)
1387 int classes_num = (pressure_p
1388 ? ira_pressure_classes_num : ira_allocno_classes_num);
1389 enum reg_class *classes = (pressure_p
1390 ? ira_pressure_classes : ira_allocno_classes);
1391 enum reg_class *class_translate = (pressure_p
1392 ? ira_pressure_class_translate
1393 : ira_allocno_class_translate);
1394 int i;
1396 fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno");
1397 for (i = 0; i < classes_num; i++)
1398 fprintf (f, " %s", reg_class_names[classes[i]]);
1399 fprintf (f, "\nClass translation:\n");
1400 for (i = 0; i < N_REG_CLASSES; i++)
1401 fprintf (f, " %s -> %s\n", reg_class_names[i],
1402 reg_class_names[class_translate[i]]);
1405 /* Output all possible allocno and translation classes and the
1406 translation maps into stderr. */
1407 void
1408 ira_debug_allocno_classes (void)
1410 print_uniform_and_important_classes (stderr);
1411 print_translated_classes (stderr, false);
1412 print_translated_classes (stderr, true);
1415 /* Set up different arrays concerning class subsets, allocno and
1416 important classes. */
1417 static void
1418 find_reg_classes (void)
1420 setup_allocno_and_important_classes ();
1421 setup_class_translate ();
1422 reorder_important_classes ();
1423 setup_reg_class_relations ();
1428 /* Set up the array above. */
1429 static void
1430 setup_hard_regno_aclass (void)
1432 int i;
1434 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1436 #if 1
1437 ira_hard_regno_allocno_class[i]
1438 = (TEST_HARD_REG_BIT (no_unit_alloc_regs, i)
1439 ? NO_REGS
1440 : ira_allocno_class_translate[REGNO_REG_CLASS (i)]);
1441 #else
1442 int j;
1443 enum reg_class cl;
1444 ira_hard_regno_allocno_class[i] = NO_REGS;
1445 for (j = 0; j < ira_allocno_classes_num; j++)
1447 cl = ira_allocno_classes[j];
1448 if (ira_class_hard_reg_index[cl][i] >= 0)
1450 ira_hard_regno_allocno_class[i] = cl;
1451 break;
1454 #endif
1460 /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps. */
1461 static void
1462 setup_reg_class_nregs (void)
1464 int i, cl, cl2, m;
1466 for (m = 0; m < MAX_MACHINE_MODE; m++)
1468 for (cl = 0; cl < N_REG_CLASSES; cl++)
1469 ira_reg_class_max_nregs[cl][m]
1470 = ira_reg_class_min_nregs[cl][m]
1471 = targetm.class_max_nregs ((reg_class_t) cl, (machine_mode) m);
1472 for (cl = 0; cl < N_REG_CLASSES; cl++)
1473 for (i = 0;
1474 (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES;
1475 i++)
1476 if (ira_reg_class_min_nregs[cl2][m]
1477 < ira_reg_class_min_nregs[cl][m])
1478 ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m];
1484 /* Set up IRA_PROHIBITED_CLASS_MODE_REGS and IRA_CLASS_SINGLETON.
1485 This function is called once IRA_CLASS_HARD_REGS has been initialized. */
1486 static void
1487 setup_prohibited_class_mode_regs (void)
1489 int j, k, hard_regno, cl, last_hard_regno, count;
1491 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1493 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1494 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1495 for (j = 0; j < NUM_MACHINE_MODES; j++)
1497 count = 0;
1498 last_hard_regno = -1;
1499 CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]);
1500 for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1502 hard_regno = ira_class_hard_regs[cl][k];
1503 if (! HARD_REGNO_MODE_OK (hard_regno, (machine_mode) j))
1504 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1505 hard_regno);
1506 else if (in_hard_reg_set_p (temp_hard_regset,
1507 (machine_mode) j, hard_regno))
1509 last_hard_regno = hard_regno;
1510 count++;
1513 ira_class_singleton[cl][j] = (count == 1 ? last_hard_regno : -1);
1518 /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers
1519 spanning from one register pressure class to another one. It is
1520 called after defining the pressure classes. */
1521 static void
1522 clarify_prohibited_class_mode_regs (void)
1524 int j, k, hard_regno, cl, pclass, nregs;
1526 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1527 for (j = 0; j < NUM_MACHINE_MODES; j++)
1529 CLEAR_HARD_REG_SET (ira_useful_class_mode_regs[cl][j]);
1530 for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1532 hard_regno = ira_class_hard_regs[cl][k];
1533 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno))
1534 continue;
1535 nregs = hard_regno_nregs[hard_regno][j];
1536 if (hard_regno + nregs > FIRST_PSEUDO_REGISTER)
1538 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1539 hard_regno);
1540 continue;
1542 pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1543 for (nregs-- ;nregs >= 0; nregs--)
1544 if (((enum reg_class) pclass
1545 != ira_pressure_class_translate[REGNO_REG_CLASS
1546 (hard_regno + nregs)]))
1548 SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1549 hard_regno);
1550 break;
1552 if (!TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1553 hard_regno))
1554 add_to_hard_reg_set (&ira_useful_class_mode_regs[cl][j],
1555 (machine_mode) j, hard_regno);
1560 /* Allocate and initialize IRA_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST
1561 and IRA_MAY_MOVE_OUT_COST for MODE. */
1562 void
1563 ira_init_register_move_cost (machine_mode mode)
1565 static unsigned short last_move_cost[N_REG_CLASSES][N_REG_CLASSES];
1566 bool all_match = true;
1567 unsigned int cl1, cl2;
1569 ira_assert (ira_register_move_cost[mode] == NULL
1570 && ira_may_move_in_cost[mode] == NULL
1571 && ira_may_move_out_cost[mode] == NULL);
1572 ira_assert (have_regs_of_mode[mode]);
1573 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1574 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1576 int cost;
1577 if (!contains_reg_of_mode[cl1][mode]
1578 || !contains_reg_of_mode[cl2][mode])
1580 if ((ira_reg_class_max_nregs[cl1][mode]
1581 > ira_class_hard_regs_num[cl1])
1582 || (ira_reg_class_max_nregs[cl2][mode]
1583 > ira_class_hard_regs_num[cl2]))
1584 cost = 65535;
1585 else
1586 cost = (ira_memory_move_cost[mode][cl1][0]
1587 + ira_memory_move_cost[mode][cl2][1]) * 2;
1589 else
1591 cost = register_move_cost (mode, (enum reg_class) cl1,
1592 (enum reg_class) cl2);
1593 ira_assert (cost < 65535);
1595 all_match &= (last_move_cost[cl1][cl2] == cost);
1596 last_move_cost[cl1][cl2] = cost;
1598 if (all_match && last_mode_for_init_move_cost != -1)
1600 ira_register_move_cost[mode]
1601 = ira_register_move_cost[last_mode_for_init_move_cost];
1602 ira_may_move_in_cost[mode]
1603 = ira_may_move_in_cost[last_mode_for_init_move_cost];
1604 ira_may_move_out_cost[mode]
1605 = ira_may_move_out_cost[last_mode_for_init_move_cost];
1606 return;
1608 last_mode_for_init_move_cost = mode;
1609 ira_register_move_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1610 ira_may_move_in_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1611 ira_may_move_out_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1612 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1613 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1615 int cost;
1616 enum reg_class *p1, *p2;
1618 if (last_move_cost[cl1][cl2] == 65535)
1620 ira_register_move_cost[mode][cl1][cl2] = 65535;
1621 ira_may_move_in_cost[mode][cl1][cl2] = 65535;
1622 ira_may_move_out_cost[mode][cl1][cl2] = 65535;
1624 else
1626 cost = last_move_cost[cl1][cl2];
1628 for (p2 = &reg_class_subclasses[cl2][0];
1629 *p2 != LIM_REG_CLASSES; p2++)
1630 if (ira_class_hard_regs_num[*p2] > 0
1631 && (ira_reg_class_max_nregs[*p2][mode]
1632 <= ira_class_hard_regs_num[*p2]))
1633 cost = MAX (cost, ira_register_move_cost[mode][cl1][*p2]);
1635 for (p1 = &reg_class_subclasses[cl1][0];
1636 *p1 != LIM_REG_CLASSES; p1++)
1637 if (ira_class_hard_regs_num[*p1] > 0
1638 && (ira_reg_class_max_nregs[*p1][mode]
1639 <= ira_class_hard_regs_num[*p1]))
1640 cost = MAX (cost, ira_register_move_cost[mode][*p1][cl2]);
1642 ira_assert (cost <= 65535);
1643 ira_register_move_cost[mode][cl1][cl2] = cost;
1645 if (ira_class_subset_p[cl1][cl2])
1646 ira_may_move_in_cost[mode][cl1][cl2] = 0;
1647 else
1648 ira_may_move_in_cost[mode][cl1][cl2] = cost;
1650 if (ira_class_subset_p[cl2][cl1])
1651 ira_may_move_out_cost[mode][cl1][cl2] = 0;
1652 else
1653 ira_may_move_out_cost[mode][cl1][cl2] = cost;
1660 /* This is called once during compiler work. It sets up
1661 different arrays whose values don't depend on the compiled
1662 function. */
1663 void
1664 ira_init_once (void)
1666 ira_init_costs_once ();
1667 lra_init_once ();
1670 /* Free ira_max_register_move_cost, ira_may_move_in_cost and
1671 ira_may_move_out_cost for each mode. */
1672 void
1673 target_ira_int::free_register_move_costs (void)
1675 int mode, i;
1677 /* Reset move_cost and friends, making sure we only free shared
1678 table entries once. */
1679 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1680 if (x_ira_register_move_cost[mode])
1682 for (i = 0;
1683 i < mode && (x_ira_register_move_cost[i]
1684 != x_ira_register_move_cost[mode]);
1685 i++)
1687 if (i == mode)
1689 free (x_ira_register_move_cost[mode]);
1690 free (x_ira_may_move_in_cost[mode]);
1691 free (x_ira_may_move_out_cost[mode]);
1694 memset (x_ira_register_move_cost, 0, sizeof x_ira_register_move_cost);
1695 memset (x_ira_may_move_in_cost, 0, sizeof x_ira_may_move_in_cost);
1696 memset (x_ira_may_move_out_cost, 0, sizeof x_ira_may_move_out_cost);
1697 last_mode_for_init_move_cost = -1;
1700 target_ira_int::~target_ira_int ()
1702 free_ira_costs ();
1703 free_register_move_costs ();
1706 /* This is called every time when register related information is
1707 changed. */
1708 void
1709 ira_init (void)
1711 this_target_ira_int->free_register_move_costs ();
1712 setup_reg_mode_hard_regset ();
1713 setup_alloc_regs (flag_omit_frame_pointer != 0);
1714 setup_class_subset_and_memory_move_costs ();
1715 setup_reg_class_nregs ();
1716 setup_prohibited_class_mode_regs ();
1717 find_reg_classes ();
1718 clarify_prohibited_class_mode_regs ();
1719 setup_hard_regno_aclass ();
1720 ira_init_costs ();
1724 #define ira_prohibited_mode_move_regs_initialized_p \
1725 (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p)
1727 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */
1728 static void
1729 setup_prohibited_mode_move_regs (void)
1731 int i, j;
1732 rtx test_reg1, test_reg2, move_pat;
1733 rtx_insn *move_insn;
1735 if (ira_prohibited_mode_move_regs_initialized_p)
1736 return;
1737 ira_prohibited_mode_move_regs_initialized_p = true;
1738 test_reg1 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
1739 test_reg2 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 2);
1740 move_pat = gen_rtx_SET (test_reg1, test_reg2);
1741 move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, move_pat, 0, -1, 0);
1742 for (i = 0; i < NUM_MACHINE_MODES; i++)
1744 SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
1745 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
1747 if (! HARD_REGNO_MODE_OK (j, (machine_mode) i))
1748 continue;
1749 set_mode_and_regno (test_reg1, (machine_mode) i, j);
1750 set_mode_and_regno (test_reg2, (machine_mode) i, j);
1751 INSN_CODE (move_insn) = -1;
1752 recog_memoized (move_insn);
1753 if (INSN_CODE (move_insn) < 0)
1754 continue;
1755 extract_insn (move_insn);
1756 /* We don't know whether the move will be in code that is optimized
1757 for size or speed, so consider all enabled alternatives. */
1758 if (! constrain_operands (1, get_enabled_alternatives (move_insn)))
1759 continue;
1760 CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
1767 /* Setup possible alternatives in ALTS for INSN. */
1768 void
1769 ira_setup_alts (rtx_insn *insn, HARD_REG_SET &alts)
1771 /* MAP nalt * nop -> start of constraints for given operand and
1772 alternative. */
1773 static vec<const char *> insn_constraints;
1774 int nop, nalt;
1775 bool curr_swapped;
1776 const char *p;
1777 int commutative = -1;
1779 extract_insn (insn);
1780 alternative_mask preferred = get_preferred_alternatives (insn);
1781 CLEAR_HARD_REG_SET (alts);
1782 insn_constraints.release ();
1783 insn_constraints.safe_grow_cleared (recog_data.n_operands
1784 * recog_data.n_alternatives + 1);
1785 /* Check that the hard reg set is enough for holding all
1786 alternatives. It is hard to imagine the situation when the
1787 assertion is wrong. */
1788 ira_assert (recog_data.n_alternatives
1789 <= (int) MAX (sizeof (HARD_REG_ELT_TYPE) * CHAR_BIT,
1790 FIRST_PSEUDO_REGISTER));
1791 for (curr_swapped = false;; curr_swapped = true)
1793 /* Calculate some data common for all alternatives to speed up the
1794 function. */
1795 for (nop = 0; nop < recog_data.n_operands; nop++)
1797 for (nalt = 0, p = recog_data.constraints[nop];
1798 nalt < recog_data.n_alternatives;
1799 nalt++)
1801 insn_constraints[nop * recog_data.n_alternatives + nalt] = p;
1802 while (*p && *p != ',')
1804 /* We only support one commutative marker, the first
1805 one. We already set commutative above. */
1806 if (*p == '%' && commutative < 0)
1807 commutative = nop;
1808 p++;
1810 if (*p)
1811 p++;
1814 for (nalt = 0; nalt < recog_data.n_alternatives; nalt++)
1816 if (!TEST_BIT (preferred, nalt)
1817 || TEST_HARD_REG_BIT (alts, nalt))
1818 continue;
1820 for (nop = 0; nop < recog_data.n_operands; nop++)
1822 int c, len;
1824 rtx op = recog_data.operand[nop];
1825 p = insn_constraints[nop * recog_data.n_alternatives + nalt];
1826 if (*p == 0 || *p == ',')
1827 continue;
1830 switch (c = *p, len = CONSTRAINT_LEN (c, p), c)
1832 case '#':
1833 case ',':
1834 c = '\0';
1835 case '\0':
1836 len = 0;
1837 break;
1839 case '%':
1840 /* The commutative modifier is handled above. */
1841 break;
1843 case '0': case '1': case '2': case '3': case '4':
1844 case '5': case '6': case '7': case '8': case '9':
1845 goto op_success;
1846 break;
1848 case 'g':
1849 goto op_success;
1850 break;
1852 default:
1854 enum constraint_num cn = lookup_constraint (p);
1855 switch (get_constraint_type (cn))
1857 case CT_REGISTER:
1858 if (reg_class_for_constraint (cn) != NO_REGS)
1859 goto op_success;
1860 break;
1862 case CT_CONST_INT:
1863 if (CONST_INT_P (op)
1864 && (insn_const_int_ok_for_constraint
1865 (INTVAL (op), cn)))
1866 goto op_success;
1867 break;
1869 case CT_ADDRESS:
1870 case CT_MEMORY:
1871 case CT_SPECIAL_MEMORY:
1872 goto op_success;
1874 case CT_FIXED_FORM:
1875 if (constraint_satisfied_p (op, cn))
1876 goto op_success;
1877 break;
1879 break;
1882 while (p += len, c);
1883 break;
1884 op_success:
1887 if (nop >= recog_data.n_operands)
1888 SET_HARD_REG_BIT (alts, nalt);
1890 if (commutative < 0)
1891 break;
1892 /* Swap forth and back to avoid changing recog_data. */
1893 std::swap (recog_data.operand[commutative],
1894 recog_data.operand[commutative + 1]);
1895 if (curr_swapped)
1896 break;
1900 /* Return the number of the output non-early clobber operand which
1901 should be the same in any case as operand with number OP_NUM (or
1902 negative value if there is no such operand). The function takes
1903 only really possible alternatives into consideration. */
1905 ira_get_dup_out_num (int op_num, HARD_REG_SET &alts)
1907 int curr_alt, c, original, dup;
1908 bool ignore_p, use_commut_op_p;
1909 const char *str;
1911 if (op_num < 0 || recog_data.n_alternatives == 0)
1912 return -1;
1913 /* We should find duplications only for input operands. */
1914 if (recog_data.operand_type[op_num] != OP_IN)
1915 return -1;
1916 str = recog_data.constraints[op_num];
1917 use_commut_op_p = false;
1918 for (;;)
1920 rtx op = recog_data.operand[op_num];
1922 for (curr_alt = 0, ignore_p = !TEST_HARD_REG_BIT (alts, curr_alt),
1923 original = -1;;)
1925 c = *str;
1926 if (c == '\0')
1927 break;
1928 if (c == '#')
1929 ignore_p = true;
1930 else if (c == ',')
1932 curr_alt++;
1933 ignore_p = !TEST_HARD_REG_BIT (alts, curr_alt);
1935 else if (! ignore_p)
1936 switch (c)
1938 case 'g':
1939 goto fail;
1940 default:
1942 enum constraint_num cn = lookup_constraint (str);
1943 enum reg_class cl = reg_class_for_constraint (cn);
1944 if (cl != NO_REGS
1945 && !targetm.class_likely_spilled_p (cl))
1946 goto fail;
1947 if (constraint_satisfied_p (op, cn))
1948 goto fail;
1949 break;
1952 case '0': case '1': case '2': case '3': case '4':
1953 case '5': case '6': case '7': case '8': case '9':
1954 if (original != -1 && original != c)
1955 goto fail;
1956 original = c;
1957 break;
1959 str += CONSTRAINT_LEN (c, str);
1961 if (original == -1)
1962 goto fail;
1963 dup = -1;
1964 for (ignore_p = false, str = recog_data.constraints[original - '0'];
1965 *str != 0;
1966 str++)
1967 if (ignore_p)
1969 if (*str == ',')
1970 ignore_p = false;
1972 else if (*str == '#')
1973 ignore_p = true;
1974 else if (! ignore_p)
1976 if (*str == '=')
1977 dup = original - '0';
1978 /* It is better ignore an alternative with early clobber. */
1979 else if (*str == '&')
1980 goto fail;
1982 if (dup >= 0)
1983 return dup;
1984 fail:
1985 if (use_commut_op_p)
1986 break;
1987 use_commut_op_p = true;
1988 if (recog_data.constraints[op_num][0] == '%')
1989 str = recog_data.constraints[op_num + 1];
1990 else if (op_num > 0 && recog_data.constraints[op_num - 1][0] == '%')
1991 str = recog_data.constraints[op_num - 1];
1992 else
1993 break;
1995 return -1;
2000 /* Search forward to see if the source register of a copy insn dies
2001 before either it or the destination register is modified, but don't
2002 scan past the end of the basic block. If so, we can replace the
2003 source with the destination and let the source die in the copy
2004 insn.
2006 This will reduce the number of registers live in that range and may
2007 enable the destination and the source coalescing, thus often saving
2008 one register in addition to a register-register copy. */
2010 static void
2011 decrease_live_ranges_number (void)
2013 basic_block bb;
2014 rtx_insn *insn;
2015 rtx set, src, dest, dest_death, note;
2016 rtx_insn *p, *q;
2017 int sregno, dregno;
2019 if (! flag_expensive_optimizations)
2020 return;
2022 if (ira_dump_file)
2023 fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n");
2025 FOR_EACH_BB_FN (bb, cfun)
2026 FOR_BB_INSNS (bb, insn)
2028 set = single_set (insn);
2029 if (! set)
2030 continue;
2031 src = SET_SRC (set);
2032 dest = SET_DEST (set);
2033 if (! REG_P (src) || ! REG_P (dest)
2034 || find_reg_note (insn, REG_DEAD, src))
2035 continue;
2036 sregno = REGNO (src);
2037 dregno = REGNO (dest);
2039 /* We don't want to mess with hard regs if register classes
2040 are small. */
2041 if (sregno == dregno
2042 || (targetm.small_register_classes_for_mode_p (GET_MODE (src))
2043 && (sregno < FIRST_PSEUDO_REGISTER
2044 || dregno < FIRST_PSEUDO_REGISTER))
2045 /* We don't see all updates to SP if they are in an
2046 auto-inc memory reference, so we must disallow this
2047 optimization on them. */
2048 || sregno == STACK_POINTER_REGNUM
2049 || dregno == STACK_POINTER_REGNUM)
2050 continue;
2052 dest_death = NULL_RTX;
2054 for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
2056 if (! INSN_P (p))
2057 continue;
2058 if (BLOCK_FOR_INSN (p) != bb)
2059 break;
2061 if (reg_set_p (src, p) || reg_set_p (dest, p)
2062 /* If SRC is an asm-declared register, it must not be
2063 replaced in any asm. Unfortunately, the REG_EXPR
2064 tree for the asm variable may be absent in the SRC
2065 rtx, so we can't check the actual register
2066 declaration easily (the asm operand will have it,
2067 though). To avoid complicating the test for a rare
2068 case, we just don't perform register replacement
2069 for a hard reg mentioned in an asm. */
2070 || (sregno < FIRST_PSEUDO_REGISTER
2071 && asm_noperands (PATTERN (p)) >= 0
2072 && reg_overlap_mentioned_p (src, PATTERN (p)))
2073 /* Don't change hard registers used by a call. */
2074 || (CALL_P (p) && sregno < FIRST_PSEUDO_REGISTER
2075 && find_reg_fusage (p, USE, src))
2076 /* Don't change a USE of a register. */
2077 || (GET_CODE (PATTERN (p)) == USE
2078 && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
2079 break;
2081 /* See if all of SRC dies in P. This test is slightly
2082 more conservative than it needs to be. */
2083 if ((note = find_regno_note (p, REG_DEAD, sregno))
2084 && GET_MODE (XEXP (note, 0)) == GET_MODE (src))
2086 int failed = 0;
2088 /* We can do the optimization. Scan forward from INSN
2089 again, replacing regs as we go. Set FAILED if a
2090 replacement can't be done. In that case, we can't
2091 move the death note for SRC. This should be
2092 rare. */
2094 /* Set to stop at next insn. */
2095 for (q = next_real_insn (insn);
2096 q != next_real_insn (p);
2097 q = next_real_insn (q))
2099 if (reg_overlap_mentioned_p (src, PATTERN (q)))
2101 /* If SRC is a hard register, we might miss
2102 some overlapping registers with
2103 validate_replace_rtx, so we would have to
2104 undo it. We can't if DEST is present in
2105 the insn, so fail in that combination of
2106 cases. */
2107 if (sregno < FIRST_PSEUDO_REGISTER
2108 && reg_mentioned_p (dest, PATTERN (q)))
2109 failed = 1;
2111 /* Attempt to replace all uses. */
2112 else if (!validate_replace_rtx (src, dest, q))
2113 failed = 1;
2115 /* If this succeeded, but some part of the
2116 register is still present, undo the
2117 replacement. */
2118 else if (sregno < FIRST_PSEUDO_REGISTER
2119 && reg_overlap_mentioned_p (src, PATTERN (q)))
2121 validate_replace_rtx (dest, src, q);
2122 failed = 1;
2126 /* If DEST dies here, remove the death note and
2127 save it for later. Make sure ALL of DEST dies
2128 here; again, this is overly conservative. */
2129 if (! dest_death
2130 && (dest_death = find_regno_note (q, REG_DEAD, dregno)))
2132 if (GET_MODE (XEXP (dest_death, 0)) == GET_MODE (dest))
2133 remove_note (q, dest_death);
2134 else
2136 failed = 1;
2137 dest_death = 0;
2142 if (! failed)
2144 /* Move death note of SRC from P to INSN. */
2145 remove_note (p, note);
2146 XEXP (note, 1) = REG_NOTES (insn);
2147 REG_NOTES (insn) = note;
2150 /* DEST is also dead if INSN has a REG_UNUSED note for
2151 DEST. */
2152 if (! dest_death
2153 && (dest_death
2154 = find_regno_note (insn, REG_UNUSED, dregno)))
2156 PUT_REG_NOTE_KIND (dest_death, REG_DEAD);
2157 remove_note (insn, dest_death);
2160 /* Put death note of DEST on P if we saw it die. */
2161 if (dest_death)
2163 XEXP (dest_death, 1) = REG_NOTES (p);
2164 REG_NOTES (p) = dest_death;
2166 break;
2169 /* If SRC is a hard register which is set or killed in
2170 some other way, we can't do this optimization. */
2171 else if (sregno < FIRST_PSEUDO_REGISTER && dead_or_set_p (p, src))
2172 break;
2179 /* Return nonzero if REGNO is a particularly bad choice for reloading X. */
2180 static bool
2181 ira_bad_reload_regno_1 (int regno, rtx x)
2183 int x_regno, n, i;
2184 ira_allocno_t a;
2185 enum reg_class pref;
2187 /* We only deal with pseudo regs. */
2188 if (! x || GET_CODE (x) != REG)
2189 return false;
2191 x_regno = REGNO (x);
2192 if (x_regno < FIRST_PSEUDO_REGISTER)
2193 return false;
2195 /* If the pseudo prefers REGNO explicitly, then do not consider
2196 REGNO a bad spill choice. */
2197 pref = reg_preferred_class (x_regno);
2198 if (reg_class_size[pref] == 1)
2199 return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno);
2201 /* If the pseudo conflicts with REGNO, then we consider REGNO a
2202 poor choice for a reload regno. */
2203 a = ira_regno_allocno_map[x_regno];
2204 n = ALLOCNO_NUM_OBJECTS (a);
2205 for (i = 0; i < n; i++)
2207 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2208 if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
2209 return true;
2211 return false;
2214 /* Return nonzero if REGNO is a particularly bad choice for reloading
2215 IN or OUT. */
2216 bool
2217 ira_bad_reload_regno (int regno, rtx in, rtx out)
2219 return (ira_bad_reload_regno_1 (regno, in)
2220 || ira_bad_reload_regno_1 (regno, out));
2223 /* Add register clobbers from asm statements. */
2224 static void
2225 compute_regs_asm_clobbered (void)
2227 basic_block bb;
2229 FOR_EACH_BB_FN (bb, cfun)
2231 rtx_insn *insn;
2232 FOR_BB_INSNS_REVERSE (bb, insn)
2234 df_ref def;
2236 if (NONDEBUG_INSN_P (insn) && extract_asm_operands (PATTERN (insn)))
2237 FOR_EACH_INSN_DEF (def, insn)
2239 unsigned int dregno = DF_REF_REGNO (def);
2240 if (HARD_REGISTER_NUM_P (dregno))
2241 add_to_hard_reg_set (&crtl->asm_clobbers,
2242 GET_MODE (DF_REF_REAL_REG (def)),
2243 dregno);
2250 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and
2251 REGS_EVER_LIVE. */
2252 void
2253 ira_setup_eliminable_regset (void)
2255 #ifdef ELIMINABLE_REGS
2256 int i;
2257 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
2258 #endif
2259 /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
2260 sp for alloca. So we can't eliminate the frame pointer in that
2261 case. At some point, we should improve this by emitting the
2262 sp-adjusting insns for this case. */
2263 frame_pointer_needed
2264 = (! flag_omit_frame_pointer
2265 || (cfun->calls_alloca && EXIT_IGNORE_STACK)
2266 /* We need the frame pointer to catch stack overflow exceptions if
2267 the stack pointer is moving (as for the alloca case just above). */
2268 || (STACK_CHECK_MOVING_SP
2269 && flag_stack_check
2270 && flag_exceptions
2271 && cfun->can_throw_non_call_exceptions)
2272 || crtl->accesses_prior_frames
2273 || (SUPPORTS_STACK_ALIGNMENT && crtl->stack_realign_needed)
2274 /* We need a frame pointer for all Cilk Plus functions that use
2275 Cilk keywords. */
2276 || (flag_cilkplus && cfun->is_cilk_function)
2277 || targetm.frame_pointer_required ());
2279 /* The chance that FRAME_POINTER_NEEDED is changed from inspecting
2280 RTL is very small. So if we use frame pointer for RA and RTL
2281 actually prevents this, we will spill pseudos assigned to the
2282 frame pointer in LRA. */
2284 if (frame_pointer_needed)
2285 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
2287 COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
2288 CLEAR_HARD_REG_SET (eliminable_regset);
2290 compute_regs_asm_clobbered ();
2292 /* Build the regset of all eliminable registers and show we can't
2293 use those that we already know won't be eliminated. */
2294 #ifdef ELIMINABLE_REGS
2295 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
2297 bool cannot_elim
2298 = (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
2299 || (eliminables[i].to == STACK_POINTER_REGNUM && frame_pointer_needed));
2301 if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from))
2303 SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
2305 if (cannot_elim)
2306 SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
2308 else if (cannot_elim)
2309 error ("%s cannot be used in asm here",
2310 reg_names[eliminables[i].from]);
2311 else
2312 df_set_regs_ever_live (eliminables[i].from, true);
2314 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
2316 if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
2318 SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
2319 if (frame_pointer_needed)
2320 SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
2322 else if (frame_pointer_needed)
2323 error ("%s cannot be used in asm here",
2324 reg_names[HARD_FRAME_POINTER_REGNUM]);
2325 else
2326 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
2329 #else
2330 if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
2332 SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
2333 if (frame_pointer_needed)
2334 SET_HARD_REG_BIT (ira_no_alloc_regs, FRAME_POINTER_REGNUM);
2336 else if (frame_pointer_needed)
2337 error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]);
2338 else
2339 df_set_regs_ever_live (FRAME_POINTER_REGNUM, true);
2340 #endif
2345 /* Vector of substitutions of register numbers,
2346 used to map pseudo regs into hardware regs.
2347 This is set up as a result of register allocation.
2348 Element N is the hard reg assigned to pseudo reg N,
2349 or is -1 if no hard reg was assigned.
2350 If N is a hard reg number, element N is N. */
2351 short *reg_renumber;
2353 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
2354 the allocation found by IRA. */
2355 static void
2356 setup_reg_renumber (void)
2358 int regno, hard_regno;
2359 ira_allocno_t a;
2360 ira_allocno_iterator ai;
2362 caller_save_needed = 0;
2363 FOR_EACH_ALLOCNO (a, ai)
2365 if (ira_use_lra_p && ALLOCNO_CAP_MEMBER (a) != NULL)
2366 continue;
2367 /* There are no caps at this point. */
2368 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2369 if (! ALLOCNO_ASSIGNED_P (a))
2370 /* It can happen if A is not referenced but partially anticipated
2371 somewhere in a region. */
2372 ALLOCNO_ASSIGNED_P (a) = true;
2373 ira_free_allocno_updated_costs (a);
2374 hard_regno = ALLOCNO_HARD_REGNO (a);
2375 regno = ALLOCNO_REGNO (a);
2376 reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
2377 if (hard_regno >= 0)
2379 int i, nwords;
2380 enum reg_class pclass;
2381 ira_object_t obj;
2383 pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
2384 nwords = ALLOCNO_NUM_OBJECTS (a);
2385 for (i = 0; i < nwords; i++)
2387 obj = ALLOCNO_OBJECT (a, i);
2388 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2389 reg_class_contents[pclass]);
2391 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
2392 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
2393 call_used_reg_set))
2395 ira_assert (!optimize || flag_caller_saves
2396 || (ALLOCNO_CALLS_CROSSED_NUM (a)
2397 == ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2398 || regno >= ira_reg_equiv_len
2399 || ira_equiv_no_lvalue_p (regno));
2400 caller_save_needed = 1;
2406 /* Set up allocno assignment flags for further allocation
2407 improvements. */
2408 static void
2409 setup_allocno_assignment_flags (void)
2411 int hard_regno;
2412 ira_allocno_t a;
2413 ira_allocno_iterator ai;
2415 FOR_EACH_ALLOCNO (a, ai)
2417 if (! ALLOCNO_ASSIGNED_P (a))
2418 /* It can happen if A is not referenced but partially anticipated
2419 somewhere in a region. */
2420 ira_free_allocno_updated_costs (a);
2421 hard_regno = ALLOCNO_HARD_REGNO (a);
2422 /* Don't assign hard registers to allocnos which are destination
2423 of removed store at the end of loop. It has no sense to keep
2424 the same value in different hard registers. It is also
2425 impossible to assign hard registers correctly to such
2426 allocnos because the cost info and info about intersected
2427 calls are incorrect for them. */
2428 ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0
2429 || ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p
2430 || (ALLOCNO_MEMORY_COST (a)
2431 - ALLOCNO_CLASS_COST (a)) < 0);
2432 ira_assert
2433 (hard_regno < 0
2434 || ira_hard_reg_in_set_p (hard_regno, ALLOCNO_MODE (a),
2435 reg_class_contents[ALLOCNO_CLASS (a)]));
2439 /* Evaluate overall allocation cost and the costs for using hard
2440 registers and memory for allocnos. */
2441 static void
2442 calculate_allocation_cost (void)
2444 int hard_regno, cost;
2445 ira_allocno_t a;
2446 ira_allocno_iterator ai;
2448 ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
2449 FOR_EACH_ALLOCNO (a, ai)
2451 hard_regno = ALLOCNO_HARD_REGNO (a);
2452 ira_assert (hard_regno < 0
2453 || (ira_hard_reg_in_set_p
2454 (hard_regno, ALLOCNO_MODE (a),
2455 reg_class_contents[ALLOCNO_CLASS (a)])));
2456 if (hard_regno < 0)
2458 cost = ALLOCNO_MEMORY_COST (a);
2459 ira_mem_cost += cost;
2461 else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
2463 cost = (ALLOCNO_HARD_REG_COSTS (a)
2464 [ira_class_hard_reg_index
2465 [ALLOCNO_CLASS (a)][hard_regno]]);
2466 ira_reg_cost += cost;
2468 else
2470 cost = ALLOCNO_CLASS_COST (a);
2471 ira_reg_cost += cost;
2473 ira_overall_cost += cost;
2476 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2478 fprintf (ira_dump_file,
2479 "+++Costs: overall %" PRId64
2480 ", reg %" PRId64
2481 ", mem %" PRId64
2482 ", ld %" PRId64
2483 ", st %" PRId64
2484 ", move %" PRId64,
2485 ira_overall_cost, ira_reg_cost, ira_mem_cost,
2486 ira_load_cost, ira_store_cost, ira_shuffle_cost);
2487 fprintf (ira_dump_file, "\n+++ move loops %d, new jumps %d\n",
2488 ira_move_loops_num, ira_additional_jumps_num);
2493 #ifdef ENABLE_IRA_CHECKING
2494 /* Check the correctness of the allocation. We do need this because
2495 of complicated code to transform more one region internal
2496 representation into one region representation. */
2497 static void
2498 check_allocation (void)
2500 ira_allocno_t a;
2501 int hard_regno, nregs, conflict_nregs;
2502 ira_allocno_iterator ai;
2504 FOR_EACH_ALLOCNO (a, ai)
2506 int n = ALLOCNO_NUM_OBJECTS (a);
2507 int i;
2509 if (ALLOCNO_CAP_MEMBER (a) != NULL
2510 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
2511 continue;
2512 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
2513 if (nregs == 1)
2514 /* We allocated a single hard register. */
2515 n = 1;
2516 else if (n > 1)
2517 /* We allocated multiple hard registers, and we will test
2518 conflicts in a granularity of single hard regs. */
2519 nregs = 1;
2521 for (i = 0; i < n; i++)
2523 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2524 ira_object_t conflict_obj;
2525 ira_object_conflict_iterator oci;
2526 int this_regno = hard_regno;
2527 if (n > 1)
2529 if (REG_WORDS_BIG_ENDIAN)
2530 this_regno += n - i - 1;
2531 else
2532 this_regno += i;
2534 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2536 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2537 int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
2538 if (conflict_hard_regno < 0)
2539 continue;
2541 conflict_nregs
2542 = (hard_regno_nregs
2543 [conflict_hard_regno][ALLOCNO_MODE (conflict_a)]);
2545 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1
2546 && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a))
2548 if (REG_WORDS_BIG_ENDIAN)
2549 conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a)
2550 - OBJECT_SUBWORD (conflict_obj) - 1);
2551 else
2552 conflict_hard_regno += OBJECT_SUBWORD (conflict_obj);
2553 conflict_nregs = 1;
2556 if ((conflict_hard_regno <= this_regno
2557 && this_regno < conflict_hard_regno + conflict_nregs)
2558 || (this_regno <= conflict_hard_regno
2559 && conflict_hard_regno < this_regno + nregs))
2561 fprintf (stderr, "bad allocation for %d and %d\n",
2562 ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
2563 gcc_unreachable ();
2569 #endif
2571 /* Allocate REG_EQUIV_INIT. Set up it from IRA_REG_EQUIV which should
2572 be already calculated. */
2573 static void
2574 setup_reg_equiv_init (void)
2576 int i;
2577 int max_regno = max_reg_num ();
2579 for (i = 0; i < max_regno; i++)
2580 reg_equiv_init (i) = ira_reg_equiv[i].init_insns;
2583 /* Update equiv regno from movement of FROM_REGNO to TO_REGNO. INSNS
2584 are insns which were generated for such movement. It is assumed
2585 that FROM_REGNO and TO_REGNO always have the same value at the
2586 point of any move containing such registers. This function is used
2587 to update equiv info for register shuffles on the region borders
2588 and for caller save/restore insns. */
2589 void
2590 ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx_insn *insns)
2592 rtx_insn *insn;
2593 rtx x, note;
2595 if (! ira_reg_equiv[from_regno].defined_p
2596 && (! ira_reg_equiv[to_regno].defined_p
2597 || ((x = ira_reg_equiv[to_regno].memory) != NULL_RTX
2598 && ! MEM_READONLY_P (x))))
2599 return;
2600 insn = insns;
2601 if (NEXT_INSN (insn) != NULL_RTX)
2603 if (! ira_reg_equiv[to_regno].defined_p)
2605 ira_assert (ira_reg_equiv[to_regno].init_insns == NULL_RTX);
2606 return;
2608 ira_reg_equiv[to_regno].defined_p = false;
2609 ira_reg_equiv[to_regno].memory
2610 = ira_reg_equiv[to_regno].constant
2611 = ira_reg_equiv[to_regno].invariant
2612 = ira_reg_equiv[to_regno].init_insns = NULL;
2613 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2614 fprintf (ira_dump_file,
2615 " Invalidating equiv info for reg %d\n", to_regno);
2616 return;
2618 /* It is possible that FROM_REGNO still has no equivalence because
2619 in shuffles to_regno<-from_regno and from_regno<-to_regno the 2nd
2620 insn was not processed yet. */
2621 if (ira_reg_equiv[from_regno].defined_p)
2623 ira_reg_equiv[to_regno].defined_p = true;
2624 if ((x = ira_reg_equiv[from_regno].memory) != NULL_RTX)
2626 ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX
2627 && ira_reg_equiv[from_regno].constant == NULL_RTX);
2628 ira_assert (ira_reg_equiv[to_regno].memory == NULL_RTX
2629 || rtx_equal_p (ira_reg_equiv[to_regno].memory, x));
2630 ira_reg_equiv[to_regno].memory = x;
2631 if (! MEM_READONLY_P (x))
2632 /* We don't add the insn to insn init list because memory
2633 equivalence is just to say what memory is better to use
2634 when the pseudo is spilled. */
2635 return;
2637 else if ((x = ira_reg_equiv[from_regno].constant) != NULL_RTX)
2639 ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX);
2640 ira_assert (ira_reg_equiv[to_regno].constant == NULL_RTX
2641 || rtx_equal_p (ira_reg_equiv[to_regno].constant, x));
2642 ira_reg_equiv[to_regno].constant = x;
2644 else
2646 x = ira_reg_equiv[from_regno].invariant;
2647 ira_assert (x != NULL_RTX);
2648 ira_assert (ira_reg_equiv[to_regno].invariant == NULL_RTX
2649 || rtx_equal_p (ira_reg_equiv[to_regno].invariant, x));
2650 ira_reg_equiv[to_regno].invariant = x;
2652 if (find_reg_note (insn, REG_EQUIV, x) == NULL_RTX)
2654 note = set_unique_reg_note (insn, REG_EQUIV, x);
2655 gcc_assert (note != NULL_RTX);
2656 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2658 fprintf (ira_dump_file,
2659 " Adding equiv note to insn %u for reg %d ",
2660 INSN_UID (insn), to_regno);
2661 dump_value_slim (ira_dump_file, x, 1);
2662 fprintf (ira_dump_file, "\n");
2666 ira_reg_equiv[to_regno].init_insns
2667 = gen_rtx_INSN_LIST (VOIDmode, insn,
2668 ira_reg_equiv[to_regno].init_insns);
2669 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2670 fprintf (ira_dump_file,
2671 " Adding equiv init move insn %u to reg %d\n",
2672 INSN_UID (insn), to_regno);
2675 /* Fix values of array REG_EQUIV_INIT after live range splitting done
2676 by IRA. */
2677 static void
2678 fix_reg_equiv_init (void)
2680 int max_regno = max_reg_num ();
2681 int i, new_regno, max;
2682 rtx set;
2683 rtx_insn_list *x, *next, *prev;
2684 rtx_insn *insn;
2686 if (max_regno_before_ira < max_regno)
2688 max = vec_safe_length (reg_equivs);
2689 grow_reg_equivs ();
2690 for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
2691 for (prev = NULL, x = reg_equiv_init (i);
2692 x != NULL_RTX;
2693 x = next)
2695 next = x->next ();
2696 insn = x->insn ();
2697 set = single_set (insn);
2698 ira_assert (set != NULL_RTX
2699 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
2700 if (REG_P (SET_DEST (set))
2701 && ((int) REGNO (SET_DEST (set)) == i
2702 || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
2703 new_regno = REGNO (SET_DEST (set));
2704 else if (REG_P (SET_SRC (set))
2705 && ((int) REGNO (SET_SRC (set)) == i
2706 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
2707 new_regno = REGNO (SET_SRC (set));
2708 else
2709 gcc_unreachable ();
2710 if (new_regno == i)
2711 prev = x;
2712 else
2714 /* Remove the wrong list element. */
2715 if (prev == NULL_RTX)
2716 reg_equiv_init (i) = next;
2717 else
2718 XEXP (prev, 1) = next;
2719 XEXP (x, 1) = reg_equiv_init (new_regno);
2720 reg_equiv_init (new_regno) = x;
2726 #ifdef ENABLE_IRA_CHECKING
2727 /* Print redundant memory-memory copies. */
2728 static void
2729 print_redundant_copies (void)
2731 int hard_regno;
2732 ira_allocno_t a;
2733 ira_copy_t cp, next_cp;
2734 ira_allocno_iterator ai;
2736 FOR_EACH_ALLOCNO (a, ai)
2738 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2739 /* It is a cap. */
2740 continue;
2741 hard_regno = ALLOCNO_HARD_REGNO (a);
2742 if (hard_regno >= 0)
2743 continue;
2744 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2745 if (cp->first == a)
2746 next_cp = cp->next_first_allocno_copy;
2747 else
2749 next_cp = cp->next_second_allocno_copy;
2750 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL
2751 && cp->insn != NULL_RTX
2752 && ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
2753 fprintf (ira_dump_file,
2754 " Redundant move from %d(freq %d):%d\n",
2755 INSN_UID (cp->insn), cp->freq, hard_regno);
2759 #endif
2761 /* Setup preferred and alternative classes for new pseudo-registers
2762 created by IRA starting with START. */
2763 static void
2764 setup_preferred_alternate_classes_for_new_pseudos (int start)
2766 int i, old_regno;
2767 int max_regno = max_reg_num ();
2769 for (i = start; i < max_regno; i++)
2771 old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
2772 ira_assert (i != old_regno);
2773 setup_reg_classes (i, reg_preferred_class (old_regno),
2774 reg_alternate_class (old_regno),
2775 reg_allocno_class (old_regno));
2776 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2777 fprintf (ira_dump_file,
2778 " New r%d: setting preferred %s, alternative %s\n",
2779 i, reg_class_names[reg_preferred_class (old_regno)],
2780 reg_class_names[reg_alternate_class (old_regno)]);
2785 /* The number of entries allocated in reg_info. */
2786 static int allocated_reg_info_size;
2788 /* Regional allocation can create new pseudo-registers. This function
2789 expands some arrays for pseudo-registers. */
2790 static void
2791 expand_reg_info (void)
2793 int i;
2794 int size = max_reg_num ();
2796 resize_reg_info ();
2797 for (i = allocated_reg_info_size; i < size; i++)
2798 setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
2799 setup_preferred_alternate_classes_for_new_pseudos (allocated_reg_info_size);
2800 allocated_reg_info_size = size;
2803 /* Return TRUE if there is too high register pressure in the function.
2804 It is used to decide when stack slot sharing is worth to do. */
2805 static bool
2806 too_high_register_pressure_p (void)
2808 int i;
2809 enum reg_class pclass;
2811 for (i = 0; i < ira_pressure_classes_num; i++)
2813 pclass = ira_pressure_classes[i];
2814 if (ira_loop_tree_root->reg_pressure[pclass] > 10000)
2815 return true;
2817 return false;
2822 /* Indicate that hard register number FROM was eliminated and replaced with
2823 an offset from hard register number TO. The status of hard registers live
2824 at the start of a basic block is updated by replacing a use of FROM with
2825 a use of TO. */
2827 void
2828 mark_elimination (int from, int to)
2830 basic_block bb;
2831 bitmap r;
2833 FOR_EACH_BB_FN (bb, cfun)
2835 r = DF_LR_IN (bb);
2836 if (bitmap_bit_p (r, from))
2838 bitmap_clear_bit (r, from);
2839 bitmap_set_bit (r, to);
2841 if (! df_live)
2842 continue;
2843 r = DF_LIVE_IN (bb);
2844 if (bitmap_bit_p (r, from))
2846 bitmap_clear_bit (r, from);
2847 bitmap_set_bit (r, to);
2854 /* The length of the following array. */
2855 int ira_reg_equiv_len;
2857 /* Info about equiv. info for each register. */
2858 struct ira_reg_equiv_s *ira_reg_equiv;
2860 /* Expand ira_reg_equiv if necessary. */
2861 void
2862 ira_expand_reg_equiv (void)
2864 int old = ira_reg_equiv_len;
2866 if (ira_reg_equiv_len > max_reg_num ())
2867 return;
2868 ira_reg_equiv_len = max_reg_num () * 3 / 2 + 1;
2869 ira_reg_equiv
2870 = (struct ira_reg_equiv_s *) xrealloc (ira_reg_equiv,
2871 ira_reg_equiv_len
2872 * sizeof (struct ira_reg_equiv_s));
2873 gcc_assert (old < ira_reg_equiv_len);
2874 memset (ira_reg_equiv + old, 0,
2875 sizeof (struct ira_reg_equiv_s) * (ira_reg_equiv_len - old));
2878 static void
2879 init_reg_equiv (void)
2881 ira_reg_equiv_len = 0;
2882 ira_reg_equiv = NULL;
2883 ira_expand_reg_equiv ();
2886 static void
2887 finish_reg_equiv (void)
2889 free (ira_reg_equiv);
2894 struct equivalence
2896 /* Set when a REG_EQUIV note is found or created. Use to
2897 keep track of what memory accesses might be created later,
2898 e.g. by reload. */
2899 rtx replacement;
2900 rtx *src_p;
2902 /* The list of each instruction which initializes this register.
2904 NULL indicates we know nothing about this register's equivalence
2905 properties.
2907 An INSN_LIST with a NULL insn indicates this pseudo is already
2908 known to not have a valid equivalence. */
2909 rtx_insn_list *init_insns;
2911 /* Loop depth is used to recognize equivalences which appear
2912 to be present within the same loop (or in an inner loop). */
2913 short loop_depth;
2914 /* Nonzero if this had a preexisting REG_EQUIV note. */
2915 unsigned char is_arg_equivalence : 1;
2916 /* Set when an attempt should be made to replace a register
2917 with the associated src_p entry. */
2918 unsigned char replace : 1;
2919 /* Set if this register has no known equivalence. */
2920 unsigned char no_equiv : 1;
2921 /* Set if this register is mentioned in a paradoxical subreg. */
2922 unsigned char pdx_subregs : 1;
2925 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
2926 structure for that register. */
2927 static struct equivalence *reg_equiv;
2929 /* Used for communication between the following two functions. */
2930 struct equiv_mem_data
2932 /* A MEM that we wish to ensure remains unchanged. */
2933 rtx equiv_mem;
2935 /* Set true if EQUIV_MEM is modified. */
2936 bool equiv_mem_modified;
2939 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
2940 Called via note_stores. */
2941 static void
2942 validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
2943 void *data)
2945 struct equiv_mem_data *info = (struct equiv_mem_data *) data;
2947 if ((REG_P (dest)
2948 && reg_overlap_mentioned_p (dest, info->equiv_mem))
2949 || (MEM_P (dest)
2950 && anti_dependence (info->equiv_mem, dest)))
2951 info->equiv_mem_modified = true;
2954 enum valid_equiv { valid_none, valid_combine, valid_reload };
2956 /* Verify that no store between START and the death of REG invalidates
2957 MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
2958 by storing into an overlapping memory location, or with a non-const
2959 CALL_INSN.
2961 Return VALID_RELOAD if MEMREF remains valid for both reload and
2962 combine_and_move insns, VALID_COMBINE if only valid for
2963 combine_and_move_insns, and VALID_NONE otherwise. */
2964 static enum valid_equiv
2965 validate_equiv_mem (rtx_insn *start, rtx reg, rtx memref)
2967 rtx_insn *insn;
2968 rtx note;
2969 struct equiv_mem_data info = { memref, false };
2970 enum valid_equiv ret = valid_reload;
2972 /* If the memory reference has side effects or is volatile, it isn't a
2973 valid equivalence. */
2974 if (side_effects_p (memref))
2975 return valid_none;
2977 for (insn = start; insn; insn = NEXT_INSN (insn))
2979 if (!INSN_P (insn))
2980 continue;
2982 if (find_reg_note (insn, REG_DEAD, reg))
2983 return ret;
2985 if (CALL_P (insn))
2987 /* We can combine a reg def from one insn into a reg use in
2988 another over a call if the memory is readonly or the call
2989 const/pure. However, we can't set reg_equiv notes up for
2990 reload over any call. The problem is the equivalent form
2991 may reference a pseudo which gets assigned a call
2992 clobbered hard reg. When we later replace REG with its
2993 equivalent form, the value in the call-clobbered reg has
2994 been changed and all hell breaks loose. */
2995 ret = valid_combine;
2996 if (!MEM_READONLY_P (memref)
2997 && !RTL_CONST_OR_PURE_CALL_P (insn))
2998 return valid_none;
3001 note_stores (PATTERN (insn), validate_equiv_mem_from_store, &info);
3002 if (info.equiv_mem_modified)
3003 return valid_none;
3005 /* If a register mentioned in MEMREF is modified via an
3006 auto-increment, we lose the equivalence. Do the same if one
3007 dies; although we could extend the life, it doesn't seem worth
3008 the trouble. */
3010 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3011 if ((REG_NOTE_KIND (note) == REG_INC
3012 || REG_NOTE_KIND (note) == REG_DEAD)
3013 && REG_P (XEXP (note, 0))
3014 && reg_overlap_mentioned_p (XEXP (note, 0), memref))
3015 return valid_none;
3018 return valid_none;
3021 /* Returns zero if X is known to be invariant. */
3022 static int
3023 equiv_init_varies_p (rtx x)
3025 RTX_CODE code = GET_CODE (x);
3026 int i;
3027 const char *fmt;
3029 switch (code)
3031 case MEM:
3032 return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
3034 case CONST:
3035 CASE_CONST_ANY:
3036 case SYMBOL_REF:
3037 case LABEL_REF:
3038 return 0;
3040 case REG:
3041 return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
3043 case ASM_OPERANDS:
3044 if (MEM_VOLATILE_P (x))
3045 return 1;
3047 /* Fall through. */
3049 default:
3050 break;
3053 fmt = GET_RTX_FORMAT (code);
3054 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3055 if (fmt[i] == 'e')
3057 if (equiv_init_varies_p (XEXP (x, i)))
3058 return 1;
3060 else if (fmt[i] == 'E')
3062 int j;
3063 for (j = 0; j < XVECLEN (x, i); j++)
3064 if (equiv_init_varies_p (XVECEXP (x, i, j)))
3065 return 1;
3068 return 0;
3071 /* Returns nonzero if X (used to initialize register REGNO) is movable.
3072 X is only movable if the registers it uses have equivalent initializations
3073 which appear to be within the same loop (or in an inner loop) and movable
3074 or if they are not candidates for local_alloc and don't vary. */
3075 static int
3076 equiv_init_movable_p (rtx x, int regno)
3078 int i, j;
3079 const char *fmt;
3080 enum rtx_code code = GET_CODE (x);
3082 switch (code)
3084 case SET:
3085 return equiv_init_movable_p (SET_SRC (x), regno);
3087 case CC0:
3088 case CLOBBER:
3089 return 0;
3091 case PRE_INC:
3092 case PRE_DEC:
3093 case POST_INC:
3094 case POST_DEC:
3095 case PRE_MODIFY:
3096 case POST_MODIFY:
3097 return 0;
3099 case REG:
3100 return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
3101 && reg_equiv[REGNO (x)].replace)
3102 || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS
3103 && ! rtx_varies_p (x, 0)));
3105 case UNSPEC_VOLATILE:
3106 return 0;
3108 case ASM_OPERANDS:
3109 if (MEM_VOLATILE_P (x))
3110 return 0;
3112 /* Fall through. */
3114 default:
3115 break;
3118 fmt = GET_RTX_FORMAT (code);
3119 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3120 switch (fmt[i])
3122 case 'e':
3123 if (! equiv_init_movable_p (XEXP (x, i), regno))
3124 return 0;
3125 break;
3126 case 'E':
3127 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3128 if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
3129 return 0;
3130 break;
3133 return 1;
3136 /* TRUE if X references a memory location that would be affected by a store
3137 to MEMREF. */
3138 static int
3139 memref_referenced_p (rtx memref, rtx x)
3141 int i, j;
3142 const char *fmt;
3143 enum rtx_code code = GET_CODE (x);
3145 switch (code)
3147 case CONST:
3148 case LABEL_REF:
3149 case SYMBOL_REF:
3150 CASE_CONST_ANY:
3151 case PC:
3152 case CC0:
3153 case HIGH:
3154 case LO_SUM:
3155 return 0;
3157 case REG:
3158 return (reg_equiv[REGNO (x)].replacement
3159 && memref_referenced_p (memref,
3160 reg_equiv[REGNO (x)].replacement));
3162 case MEM:
3163 if (true_dependence (memref, VOIDmode, x))
3164 return 1;
3165 break;
3167 case SET:
3168 /* If we are setting a MEM, it doesn't count (its address does), but any
3169 other SET_DEST that has a MEM in it is referencing the MEM. */
3170 if (MEM_P (SET_DEST (x)))
3172 if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
3173 return 1;
3175 else if (memref_referenced_p (memref, SET_DEST (x)))
3176 return 1;
3178 return memref_referenced_p (memref, SET_SRC (x));
3180 default:
3181 break;
3184 fmt = GET_RTX_FORMAT (code);
3185 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3186 switch (fmt[i])
3188 case 'e':
3189 if (memref_referenced_p (memref, XEXP (x, i)))
3190 return 1;
3191 break;
3192 case 'E':
3193 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3194 if (memref_referenced_p (memref, XVECEXP (x, i, j)))
3195 return 1;
3196 break;
3199 return 0;
3202 /* TRUE if some insn in the range (START, END] references a memory location
3203 that would be affected by a store to MEMREF.
3205 Callers should not call this routine if START is after END in the
3206 RTL chain. */
3208 static int
3209 memref_used_between_p (rtx memref, rtx_insn *start, rtx_insn *end)
3211 rtx_insn *insn;
3213 for (insn = NEXT_INSN (start);
3214 insn && insn != NEXT_INSN (end);
3215 insn = NEXT_INSN (insn))
3217 if (!NONDEBUG_INSN_P (insn))
3218 continue;
3220 if (memref_referenced_p (memref, PATTERN (insn)))
3221 return 1;
3223 /* Nonconst functions may access memory. */
3224 if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
3225 return 1;
3228 gcc_assert (insn == NEXT_INSN (end));
3229 return 0;
3232 /* Mark REG as having no known equivalence.
3233 Some instructions might have been processed before and furnished
3234 with REG_EQUIV notes for this register; these notes will have to be
3235 removed.
3236 STORE is the piece of RTL that does the non-constant / conflicting
3237 assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
3238 but needs to be there because this function is called from note_stores. */
3239 static void
3240 no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED,
3241 void *data ATTRIBUTE_UNUSED)
3243 int regno;
3244 rtx_insn_list *list;
3246 if (!REG_P (reg))
3247 return;
3248 regno = REGNO (reg);
3249 reg_equiv[regno].no_equiv = 1;
3250 list = reg_equiv[regno].init_insns;
3251 if (list && list->insn () == NULL)
3252 return;
3253 reg_equiv[regno].init_insns = gen_rtx_INSN_LIST (VOIDmode, NULL_RTX, NULL);
3254 reg_equiv[regno].replacement = NULL_RTX;
3255 /* This doesn't matter for equivalences made for argument registers, we
3256 should keep their initialization insns. */
3257 if (reg_equiv[regno].is_arg_equivalence)
3258 return;
3259 ira_reg_equiv[regno].defined_p = false;
3260 ira_reg_equiv[regno].init_insns = NULL;
3261 for (; list; list = list->next ())
3263 rtx_insn *insn = list->insn ();
3264 remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
3268 /* Check whether the SUBREG is a paradoxical subreg and set the result
3269 in PDX_SUBREGS. */
3271 static void
3272 set_paradoxical_subreg (rtx_insn *insn)
3274 subrtx_iterator::array_type array;
3275 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
3277 const_rtx subreg = *iter;
3278 if (GET_CODE (subreg) == SUBREG)
3280 const_rtx reg = SUBREG_REG (subreg);
3281 if (REG_P (reg) && paradoxical_subreg_p (subreg))
3282 reg_equiv[REGNO (reg)].pdx_subregs = true;
3287 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
3288 equivalent replacement. */
3290 static rtx
3291 adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data)
3293 if (REG_P (loc))
3295 bitmap cleared_regs = (bitmap) data;
3296 if (bitmap_bit_p (cleared_regs, REGNO (loc)))
3297 return simplify_replace_fn_rtx (copy_rtx (*reg_equiv[REGNO (loc)].src_p),
3298 NULL_RTX, adjust_cleared_regs, data);
3300 return NULL_RTX;
3303 /* Find registers that are equivalent to a single value throughout the
3304 compilation (either because they can be referenced in memory or are
3305 set once from a single constant). Lower their priority for a
3306 register.
3308 If such a register is only referenced once, try substituting its
3309 value into the using insn. If it succeeds, we can eliminate the
3310 register completely.
3312 Initialize init_insns in ira_reg_equiv array. */
3313 static void
3314 update_equiv_regs (void)
3316 rtx_insn *insn;
3317 basic_block bb;
3318 int loop_depth;
3320 /* Scan insns and set pdx_subregs if the reg is used in a
3321 paradoxical subreg. Don't set such reg equivalent to a mem,
3322 because lra will not substitute such equiv memory in order to
3323 prevent access beyond allocated memory for paradoxical memory subreg. */
3324 FOR_EACH_BB_FN (bb, cfun)
3325 FOR_BB_INSNS (bb, insn)
3326 if (NONDEBUG_INSN_P (insn))
3327 set_paradoxical_subreg (insn);
3329 /* Scan the insns and find which registers have equivalences. Do this
3330 in a separate scan of the insns because (due to -fcse-follow-jumps)
3331 a register can be set below its use. */
3332 FOR_EACH_BB_FN (bb, cfun)
3334 loop_depth = bb_loop_depth (bb);
3336 for (insn = BB_HEAD (bb);
3337 insn != NEXT_INSN (BB_END (bb));
3338 insn = NEXT_INSN (insn))
3340 rtx note;
3341 rtx set;
3342 rtx dest, src;
3343 int regno;
3345 if (! INSN_P (insn))
3346 continue;
3348 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3349 if (REG_NOTE_KIND (note) == REG_INC)
3350 no_equiv (XEXP (note, 0), note, NULL);
3352 set = single_set (insn);
3354 /* If this insn contains more (or less) than a single SET,
3355 only mark all destinations as having no known equivalence. */
3356 if (set == NULL_RTX
3357 || side_effects_p (SET_SRC (set)))
3359 note_stores (PATTERN (insn), no_equiv, NULL);
3360 continue;
3362 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3364 int i;
3366 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3368 rtx part = XVECEXP (PATTERN (insn), 0, i);
3369 if (part != set)
3370 note_stores (part, no_equiv, NULL);
3374 dest = SET_DEST (set);
3375 src = SET_SRC (set);
3377 /* See if this is setting up the equivalence between an argument
3378 register and its stack slot. */
3379 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3380 if (note)
3382 gcc_assert (REG_P (dest));
3383 regno = REGNO (dest);
3385 /* Note that we don't want to clear init_insns in
3386 ira_reg_equiv even if there are multiple sets of this
3387 register. */
3388 reg_equiv[regno].is_arg_equivalence = 1;
3390 /* The insn result can have equivalence memory although
3391 the equivalence is not set up by the insn. We add
3392 this insn to init insns as it is a flag for now that
3393 regno has an equivalence. We will remove the insn
3394 from init insn list later. */
3395 if (rtx_equal_p (src, XEXP (note, 0)) || MEM_P (XEXP (note, 0)))
3396 ira_reg_equiv[regno].init_insns
3397 = gen_rtx_INSN_LIST (VOIDmode, insn,
3398 ira_reg_equiv[regno].init_insns);
3400 /* Continue normally in case this is a candidate for
3401 replacements. */
3404 if (!optimize)
3405 continue;
3407 /* We only handle the case of a pseudo register being set
3408 once, or always to the same value. */
3409 /* ??? The mn10200 port breaks if we add equivalences for
3410 values that need an ADDRESS_REGS register and set them equivalent
3411 to a MEM of a pseudo. The actual problem is in the over-conservative
3412 handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
3413 calculate_needs, but we traditionally work around this problem
3414 here by rejecting equivalences when the destination is in a register
3415 that's likely spilled. This is fragile, of course, since the
3416 preferred class of a pseudo depends on all instructions that set
3417 or use it. */
3419 if (!REG_P (dest)
3420 || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
3421 || (reg_equiv[regno].init_insns
3422 && reg_equiv[regno].init_insns->insn () == NULL)
3423 || (targetm.class_likely_spilled_p (reg_preferred_class (regno))
3424 && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
3426 /* This might be setting a SUBREG of a pseudo, a pseudo that is
3427 also set somewhere else to a constant. */
3428 note_stores (set, no_equiv, NULL);
3429 continue;
3432 /* Don't set reg mentioned in a paradoxical subreg
3433 equivalent to a mem. */
3434 if (MEM_P (src) && reg_equiv[regno].pdx_subregs)
3436 note_stores (set, no_equiv, NULL);
3437 continue;
3440 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
3442 /* cse sometimes generates function invariants, but doesn't put a
3443 REG_EQUAL note on the insn. Since this note would be redundant,
3444 there's no point creating it earlier than here. */
3445 if (! note && ! rtx_varies_p (src, 0))
3446 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3448 /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
3449 since it represents a function call. */
3450 if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
3451 note = NULL_RTX;
3453 if (DF_REG_DEF_COUNT (regno) != 1)
3455 bool equal_p = true;
3456 rtx_insn_list *list;
3458 /* If we have already processed this pseudo and determined it
3459 can not have an equivalence, then honor that decision. */
3460 if (reg_equiv[regno].no_equiv)
3461 continue;
3463 if (! note
3464 || rtx_varies_p (XEXP (note, 0), 0)
3465 || (reg_equiv[regno].replacement
3466 && ! rtx_equal_p (XEXP (note, 0),
3467 reg_equiv[regno].replacement)))
3469 no_equiv (dest, set, NULL);
3470 continue;
3473 list = reg_equiv[regno].init_insns;
3474 for (; list; list = list->next ())
3476 rtx note_tmp;
3477 rtx_insn *insn_tmp;
3479 insn_tmp = list->insn ();
3480 note_tmp = find_reg_note (insn_tmp, REG_EQUAL, NULL_RTX);
3481 gcc_assert (note_tmp);
3482 if (! rtx_equal_p (XEXP (note, 0), XEXP (note_tmp, 0)))
3484 equal_p = false;
3485 break;
3489 if (! equal_p)
3491 no_equiv (dest, set, NULL);
3492 continue;
3496 /* Record this insn as initializing this register. */
3497 reg_equiv[regno].init_insns
3498 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
3500 /* If this register is known to be equal to a constant, record that
3501 it is always equivalent to the constant. */
3502 if (DF_REG_DEF_COUNT (regno) == 1
3503 && note && ! rtx_varies_p (XEXP (note, 0), 0))
3505 rtx note_value = XEXP (note, 0);
3506 remove_note (insn, note);
3507 set_unique_reg_note (insn, REG_EQUIV, note_value);
3510 /* If this insn introduces a "constant" register, decrease the priority
3511 of that register. Record this insn if the register is only used once
3512 more and the equivalence value is the same as our source.
3514 The latter condition is checked for two reasons: First, it is an
3515 indication that it may be more efficient to actually emit the insn
3516 as written (if no registers are available, reload will substitute
3517 the equivalence). Secondly, it avoids problems with any registers
3518 dying in this insn whose death notes would be missed.
3520 If we don't have a REG_EQUIV note, see if this insn is loading
3521 a register used only in one basic block from a MEM. If so, and the
3522 MEM remains unchanged for the life of the register, add a REG_EQUIV
3523 note. */
3524 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3526 rtx replacement = NULL_RTX;
3527 if (note)
3528 replacement = XEXP (note, 0);
3529 else if (REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
3530 && MEM_P (SET_SRC (set)))
3532 enum valid_equiv validity;
3533 validity = validate_equiv_mem (insn, dest, SET_SRC (set));
3534 if (validity != valid_none)
3536 replacement = copy_rtx (SET_SRC (set));
3537 if (validity == valid_reload)
3538 note = set_unique_reg_note (insn, REG_EQUIV, replacement);
3542 /* If we haven't done so, record for reload that this is an
3543 equivalencing insn. */
3544 if (note && !reg_equiv[regno].is_arg_equivalence)
3545 ira_reg_equiv[regno].init_insns
3546 = gen_rtx_INSN_LIST (VOIDmode, insn,
3547 ira_reg_equiv[regno].init_insns);
3549 if (replacement)
3551 reg_equiv[regno].replacement = replacement;
3552 reg_equiv[regno].src_p = &SET_SRC (set);
3553 reg_equiv[regno].loop_depth = (short) loop_depth;
3555 /* Don't mess with things live during setjmp. */
3556 if (REG_LIVE_LENGTH (regno) >= 0 && optimize)
3558 /* Note that the statement below does not affect the priority
3559 in local-alloc! */
3560 REG_LIVE_LENGTH (regno) *= 2;
3562 /* If the register is referenced exactly twice, meaning it is
3563 set once and used once, indicate that the reference may be
3564 replaced by the equivalence we computed above. Do this
3565 even if the register is only used in one block so that
3566 dependencies can be handled where the last register is
3567 used in a different block (i.e. HIGH / LO_SUM sequences)
3568 and to reduce the number of registers alive across
3569 calls. */
3571 if (REG_N_REFS (regno) == 2
3572 && (rtx_equal_p (replacement, src)
3573 || ! equiv_init_varies_p (src))
3574 && NONJUMP_INSN_P (insn)
3575 && equiv_init_movable_p (PATTERN (insn), regno))
3576 reg_equiv[regno].replace = 1;
3583 /* For insns that set a MEM to the contents of a REG that is only used
3584 in a single basic block, see if the register is always equivalent
3585 to that memory location and if moving the store from INSN to the
3586 insn that sets REG is safe. If so, put a REG_EQUIV note on the
3587 initializing insn. */
3588 static void
3589 add_store_equivs (void)
3591 bitmap_head seen_insns;
3593 bitmap_initialize (&seen_insns, NULL);
3594 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
3596 rtx set, src, dest;
3597 unsigned regno;
3598 rtx_insn *init_insn;
3600 bitmap_set_bit (&seen_insns, INSN_UID (insn));
3602 if (! INSN_P (insn))
3603 continue;
3605 set = single_set (insn);
3606 if (! set)
3607 continue;
3609 dest = SET_DEST (set);
3610 src = SET_SRC (set);
3612 /* Don't add a REG_EQUIV note if the insn already has one. The existing
3613 REG_EQUIV is likely more useful than the one we are adding. */
3614 if (MEM_P (dest) && REG_P (src)
3615 && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
3616 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
3617 && DF_REG_DEF_COUNT (regno) == 1
3618 && ! reg_equiv[regno].pdx_subregs
3619 && reg_equiv[regno].init_insns != NULL
3620 && (init_insn = reg_equiv[regno].init_insns->insn ()) != 0
3621 && bitmap_bit_p (&seen_insns, INSN_UID (init_insn))
3622 && ! find_reg_note (init_insn, REG_EQUIV, NULL_RTX)
3623 && validate_equiv_mem (init_insn, src, dest) == valid_reload
3624 && ! memref_used_between_p (dest, init_insn, insn)
3625 /* Attaching a REG_EQUIV note will fail if INIT_INSN has
3626 multiple sets. */
3627 && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
3629 /* This insn makes the equivalence, not the one initializing
3630 the register. */
3631 ira_reg_equiv[regno].init_insns
3632 = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
3633 df_notes_rescan (init_insn);
3634 if (dump_file)
3635 fprintf (dump_file,
3636 "Adding REG_EQUIV to insn %d for source of insn %d\n",
3637 INSN_UID (init_insn),
3638 INSN_UID (insn));
3641 bitmap_clear (&seen_insns);
3644 /* Scan all regs killed in an insn to see if any of them are registers
3645 only used that once. If so, see if we can replace the reference
3646 with the equivalent form. If we can, delete the initializing
3647 reference and this register will go away. If we can't replace the
3648 reference, and the initializing reference is within the same loop
3649 (or in an inner loop), then move the register initialization just
3650 before the use, so that they are in the same basic block. */
3651 static void
3652 combine_and_move_insns (void)
3654 bitmap cleared_regs = BITMAP_ALLOC (NULL);
3655 int max = max_reg_num ();
3657 for (int regno = FIRST_PSEUDO_REGISTER; regno < max; regno++)
3659 if (!reg_equiv[regno].replace)
3660 continue;
3662 rtx_insn *use_insn = 0;
3663 for (df_ref use = DF_REG_USE_CHAIN (regno);
3664 use;
3665 use = DF_REF_NEXT_REG (use))
3666 if (DF_REF_INSN_INFO (use))
3668 if (DEBUG_INSN_P (DF_REF_INSN (use)))
3669 continue;
3670 gcc_assert (!use_insn);
3671 use_insn = DF_REF_INSN (use);
3673 gcc_assert (use_insn);
3675 /* Don't substitute into jumps. indirect_jump_optimize does
3676 this for anything we are prepared to handle. */
3677 if (JUMP_P (use_insn))
3678 continue;
3680 df_ref def = DF_REG_DEF_CHAIN (regno);
3681 gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && DF_REF_INSN_INFO (def));
3682 rtx_insn *def_insn = DF_REF_INSN (def);
3684 /* We may not move instructions that can throw, since that
3685 changes basic block boundaries and we are not prepared to
3686 adjust the CFG to match. */
3687 if (can_throw_internal (def_insn))
3688 continue;
3690 basic_block use_bb = BLOCK_FOR_INSN (use_insn);
3691 basic_block def_bb = BLOCK_FOR_INSN (def_insn);
3692 if (bb_loop_depth (use_bb) > bb_loop_depth (def_bb))
3693 continue;
3695 if (asm_noperands (PATTERN (def_insn)) < 0
3696 && validate_replace_rtx (regno_reg_rtx[regno],
3697 *reg_equiv[regno].src_p, use_insn))
3699 rtx link;
3700 /* Append the REG_DEAD notes from def_insn. */
3701 for (rtx *p = &REG_NOTES (def_insn); (link = *p) != 0; )
3703 if (REG_NOTE_KIND (XEXP (link, 0)) == REG_DEAD)
3705 *p = XEXP (link, 1);
3706 XEXP (link, 1) = REG_NOTES (use_insn);
3707 REG_NOTES (use_insn) = link;
3709 else
3710 p = &XEXP (link, 1);
3713 remove_death (regno, use_insn);
3714 SET_REG_N_REFS (regno, 0);
3715 REG_FREQ (regno) = 0;
3716 delete_insn (def_insn);
3718 reg_equiv[regno].init_insns = NULL;
3719 ira_reg_equiv[regno].init_insns = NULL;
3720 bitmap_set_bit (cleared_regs, regno);
3723 /* Move the initialization of the register to just before
3724 USE_INSN. Update the flow information. */
3725 else if (prev_nondebug_insn (use_insn) != def_insn)
3727 rtx_insn *new_insn;
3729 new_insn = emit_insn_before (PATTERN (def_insn), use_insn);
3730 REG_NOTES (new_insn) = REG_NOTES (def_insn);
3731 REG_NOTES (def_insn) = 0;
3732 /* Rescan it to process the notes. */
3733 df_insn_rescan (new_insn);
3735 /* Make sure this insn is recognized before reload begins,
3736 otherwise eliminate_regs_in_insn will die. */
3737 INSN_CODE (new_insn) = INSN_CODE (def_insn);
3739 delete_insn (def_insn);
3741 XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
3743 REG_BASIC_BLOCK (regno) = use_bb->index;
3744 REG_N_CALLS_CROSSED (regno) = 0;
3745 REG_FREQ_CALLS_CROSSED (regno) = 0;
3746 REG_N_THROWING_CALLS_CROSSED (regno) = 0;
3747 REG_LIVE_LENGTH (regno) = 2;
3749 if (use_insn == BB_HEAD (use_bb))
3750 BB_HEAD (use_bb) = new_insn;
3752 ira_reg_equiv[regno].init_insns
3753 = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
3754 bitmap_set_bit (cleared_regs, regno);
3758 if (!bitmap_empty_p (cleared_regs))
3760 basic_block bb;
3762 FOR_EACH_BB_FN (bb, cfun)
3764 bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
3765 bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
3766 if (!df_live)
3767 continue;
3768 bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
3769 bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
3772 /* Last pass - adjust debug insns referencing cleared regs. */
3773 if (MAY_HAVE_DEBUG_INSNS)
3774 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
3775 if (DEBUG_INSN_P (insn))
3777 rtx old_loc = INSN_VAR_LOCATION_LOC (insn);
3778 INSN_VAR_LOCATION_LOC (insn)
3779 = simplify_replace_fn_rtx (old_loc, NULL_RTX,
3780 adjust_cleared_regs,
3781 (void *) cleared_regs);
3782 if (old_loc != INSN_VAR_LOCATION_LOC (insn))
3783 df_insn_rescan (insn);
3787 BITMAP_FREE (cleared_regs);
3790 /* A pass over indirect jumps, converting simple cases to direct jumps.
3791 Combine does this optimization too, but only within a basic block. */
3792 static void
3793 indirect_jump_optimize (void)
3795 basic_block bb;
3796 bool rebuild_p = false;
3798 FOR_EACH_BB_REVERSE_FN (bb, cfun)
3800 rtx_insn *insn = BB_END (bb);
3801 if (!JUMP_P (insn)
3802 || find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
3803 continue;
3805 rtx x = pc_set (insn);
3806 if (!x || !REG_P (SET_SRC (x)))
3807 continue;
3809 int regno = REGNO (SET_SRC (x));
3810 if (DF_REG_DEF_COUNT (regno) == 1)
3812 df_ref def = DF_REG_DEF_CHAIN (regno);
3813 if (!DF_REF_IS_ARTIFICIAL (def))
3815 rtx_insn *def_insn = DF_REF_INSN (def);
3816 rtx lab = NULL_RTX;
3817 rtx set = single_set (def_insn);
3818 if (set && GET_CODE (SET_SRC (set)) == LABEL_REF)
3819 lab = SET_SRC (set);
3820 else
3822 rtx eqnote = find_reg_note (def_insn, REG_EQUAL, NULL_RTX);
3823 if (eqnote && GET_CODE (XEXP (eqnote, 0)) == LABEL_REF)
3824 lab = XEXP (eqnote, 0);
3826 if (lab && validate_replace_rtx (SET_SRC (x), lab, insn))
3827 rebuild_p = true;
3832 if (rebuild_p)
3834 timevar_push (TV_JUMP);
3835 rebuild_jump_labels (get_insns ());
3836 if (purge_all_dead_edges ())
3837 delete_unreachable_blocks ();
3838 timevar_pop (TV_JUMP);
3842 /* Set up fields memory, constant, and invariant from init_insns in
3843 the structures of array ira_reg_equiv. */
3844 static void
3845 setup_reg_equiv (void)
3847 int i;
3848 rtx_insn_list *elem, *prev_elem, *next_elem;
3849 rtx_insn *insn;
3850 rtx set, x;
3852 for (i = FIRST_PSEUDO_REGISTER; i < ira_reg_equiv_len; i++)
3853 for (prev_elem = NULL, elem = ira_reg_equiv[i].init_insns;
3854 elem;
3855 prev_elem = elem, elem = next_elem)
3857 next_elem = elem->next ();
3858 insn = elem->insn ();
3859 set = single_set (insn);
3861 /* Init insns can set up equivalence when the reg is a destination or
3862 a source (in this case the destination is memory). */
3863 if (set != 0 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))))
3865 if ((x = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL)
3867 x = XEXP (x, 0);
3868 if (REG_P (SET_DEST (set))
3869 && REGNO (SET_DEST (set)) == (unsigned int) i
3870 && ! rtx_equal_p (SET_SRC (set), x) && MEM_P (x))
3872 /* This insn reporting the equivalence but
3873 actually not setting it. Remove it from the
3874 list. */
3875 if (prev_elem == NULL)
3876 ira_reg_equiv[i].init_insns = next_elem;
3877 else
3878 XEXP (prev_elem, 1) = next_elem;
3879 elem = prev_elem;
3882 else if (REG_P (SET_DEST (set))
3883 && REGNO (SET_DEST (set)) == (unsigned int) i)
3884 x = SET_SRC (set);
3885 else
3887 gcc_assert (REG_P (SET_SRC (set))
3888 && REGNO (SET_SRC (set)) == (unsigned int) i);
3889 x = SET_DEST (set);
3891 if (! function_invariant_p (x)
3892 || ! flag_pic
3893 /* A function invariant is often CONSTANT_P but may
3894 include a register. We promise to only pass
3895 CONSTANT_P objects to LEGITIMATE_PIC_OPERAND_P. */
3896 || (CONSTANT_P (x) && LEGITIMATE_PIC_OPERAND_P (x)))
3898 /* It can happen that a REG_EQUIV note contains a MEM
3899 that is not a legitimate memory operand. As later
3900 stages of reload assume that all addresses found in
3901 the lra_regno_equiv_* arrays were originally
3902 legitimate, we ignore such REG_EQUIV notes. */
3903 if (memory_operand (x, VOIDmode))
3905 ira_reg_equiv[i].defined_p = true;
3906 ira_reg_equiv[i].memory = x;
3907 continue;
3909 else if (function_invariant_p (x))
3911 machine_mode mode;
3913 mode = GET_MODE (SET_DEST (set));
3914 if (GET_CODE (x) == PLUS
3915 || x == frame_pointer_rtx || x == arg_pointer_rtx)
3916 /* This is PLUS of frame pointer and a constant,
3917 or fp, or argp. */
3918 ira_reg_equiv[i].invariant = x;
3919 else if (targetm.legitimate_constant_p (mode, x))
3920 ira_reg_equiv[i].constant = x;
3921 else
3923 ira_reg_equiv[i].memory = force_const_mem (mode, x);
3924 if (ira_reg_equiv[i].memory == NULL_RTX)
3926 ira_reg_equiv[i].defined_p = false;
3927 ira_reg_equiv[i].init_insns = NULL;
3928 break;
3931 ira_reg_equiv[i].defined_p = true;
3932 continue;
3936 ira_reg_equiv[i].defined_p = false;
3937 ira_reg_equiv[i].init_insns = NULL;
3938 break;
3944 /* Print chain C to FILE. */
3945 static void
3946 print_insn_chain (FILE *file, struct insn_chain *c)
3948 fprintf (file, "insn=%d, ", INSN_UID (c->insn));
3949 bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
3950 bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
3954 /* Print all reload_insn_chains to FILE. */
3955 static void
3956 print_insn_chains (FILE *file)
3958 struct insn_chain *c;
3959 for (c = reload_insn_chain; c ; c = c->next)
3960 print_insn_chain (file, c);
3963 /* Return true if pseudo REGNO should be added to set live_throughout
3964 or dead_or_set of the insn chains for reload consideration. */
3965 static bool
3966 pseudo_for_reload_consideration_p (int regno)
3968 /* Consider spilled pseudos too for IRA because they still have a
3969 chance to get hard-registers in the reload when IRA is used. */
3970 return (reg_renumber[regno] >= 0 || ira_conflicts_p);
3973 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
3974 REG to the number of nregs, and INIT_VALUE to get the
3975 initialization. ALLOCNUM need not be the regno of REG. */
3976 static void
3977 init_live_subregs (bool init_value, sbitmap *live_subregs,
3978 bitmap live_subregs_used, int allocnum, rtx reg)
3980 unsigned int regno = REGNO (SUBREG_REG (reg));
3981 int size = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
3983 gcc_assert (size > 0);
3985 /* Been there, done that. */
3986 if (bitmap_bit_p (live_subregs_used, allocnum))
3987 return;
3989 /* Create a new one. */
3990 if (live_subregs[allocnum] == NULL)
3991 live_subregs[allocnum] = sbitmap_alloc (size);
3993 /* If the entire reg was live before blasting into subregs, we need
3994 to init all of the subregs to ones else init to 0. */
3995 if (init_value)
3996 bitmap_ones (live_subregs[allocnum]);
3997 else
3998 bitmap_clear (live_subregs[allocnum]);
4000 bitmap_set_bit (live_subregs_used, allocnum);
4003 /* Walk the insns of the current function and build reload_insn_chain,
4004 and record register life information. */
4005 static void
4006 build_insn_chain (void)
4008 unsigned int i;
4009 struct insn_chain **p = &reload_insn_chain;
4010 basic_block bb;
4011 struct insn_chain *c = NULL;
4012 struct insn_chain *next = NULL;
4013 bitmap live_relevant_regs = BITMAP_ALLOC (NULL);
4014 bitmap elim_regset = BITMAP_ALLOC (NULL);
4015 /* live_subregs is a vector used to keep accurate information about
4016 which hardregs are live in multiword pseudos. live_subregs and
4017 live_subregs_used are indexed by pseudo number. The live_subreg
4018 entry for a particular pseudo is only used if the corresponding
4019 element is non zero in live_subregs_used. The sbitmap size of
4020 live_subreg[allocno] is number of bytes that the pseudo can
4021 occupy. */
4022 sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
4023 bitmap live_subregs_used = BITMAP_ALLOC (NULL);
4025 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4026 if (TEST_HARD_REG_BIT (eliminable_regset, i))
4027 bitmap_set_bit (elim_regset, i);
4028 FOR_EACH_BB_REVERSE_FN (bb, cfun)
4030 bitmap_iterator bi;
4031 rtx_insn *insn;
4033 CLEAR_REG_SET (live_relevant_regs);
4034 bitmap_clear (live_subregs_used);
4036 EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi)
4038 if (i >= FIRST_PSEUDO_REGISTER)
4039 break;
4040 bitmap_set_bit (live_relevant_regs, i);
4043 EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb),
4044 FIRST_PSEUDO_REGISTER, i, bi)
4046 if (pseudo_for_reload_consideration_p (i))
4047 bitmap_set_bit (live_relevant_regs, i);
4050 FOR_BB_INSNS_REVERSE (bb, insn)
4052 if (!NOTE_P (insn) && !BARRIER_P (insn))
4054 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4055 df_ref def, use;
4057 c = new_insn_chain ();
4058 c->next = next;
4059 next = c;
4060 *p = c;
4061 p = &c->prev;
4063 c->insn = insn;
4064 c->block = bb->index;
4066 if (NONDEBUG_INSN_P (insn))
4067 FOR_EACH_INSN_INFO_DEF (def, insn_info)
4069 unsigned int regno = DF_REF_REGNO (def);
4071 /* Ignore may clobbers because these are generated
4072 from calls. However, every other kind of def is
4073 added to dead_or_set. */
4074 if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
4076 if (regno < FIRST_PSEUDO_REGISTER)
4078 if (!fixed_regs[regno])
4079 bitmap_set_bit (&c->dead_or_set, regno);
4081 else if (pseudo_for_reload_consideration_p (regno))
4082 bitmap_set_bit (&c->dead_or_set, regno);
4085 if ((regno < FIRST_PSEUDO_REGISTER
4086 || reg_renumber[regno] >= 0
4087 || ira_conflicts_p)
4088 && (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
4090 rtx reg = DF_REF_REG (def);
4092 /* We can model subregs, but not if they are
4093 wrapped in ZERO_EXTRACTS. */
4094 if (GET_CODE (reg) == SUBREG
4095 && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
4097 unsigned int start = SUBREG_BYTE (reg);
4098 unsigned int last = start
4099 + GET_MODE_SIZE (GET_MODE (reg));
4101 init_live_subregs
4102 (bitmap_bit_p (live_relevant_regs, regno),
4103 live_subregs, live_subregs_used, regno, reg);
4105 if (!DF_REF_FLAGS_IS_SET
4106 (def, DF_REF_STRICT_LOW_PART))
4108 /* Expand the range to cover entire words.
4109 Bytes added here are "don't care". */
4110 start
4111 = start / UNITS_PER_WORD * UNITS_PER_WORD;
4112 last = ((last + UNITS_PER_WORD - 1)
4113 / UNITS_PER_WORD * UNITS_PER_WORD);
4116 /* Ignore the paradoxical bits. */
4117 if (last > SBITMAP_SIZE (live_subregs[regno]))
4118 last = SBITMAP_SIZE (live_subregs[regno]);
4120 while (start < last)
4122 bitmap_clear_bit (live_subregs[regno], start);
4123 start++;
4126 if (bitmap_empty_p (live_subregs[regno]))
4128 bitmap_clear_bit (live_subregs_used, regno);
4129 bitmap_clear_bit (live_relevant_regs, regno);
4131 else
4132 /* Set live_relevant_regs here because
4133 that bit has to be true to get us to
4134 look at the live_subregs fields. */
4135 bitmap_set_bit (live_relevant_regs, regno);
4137 else
4139 /* DF_REF_PARTIAL is generated for
4140 subregs, STRICT_LOW_PART, and
4141 ZERO_EXTRACT. We handle the subreg
4142 case above so here we have to keep from
4143 modeling the def as a killing def. */
4144 if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
4146 bitmap_clear_bit (live_subregs_used, regno);
4147 bitmap_clear_bit (live_relevant_regs, regno);
4153 bitmap_and_compl_into (live_relevant_regs, elim_regset);
4154 bitmap_copy (&c->live_throughout, live_relevant_regs);
4156 if (NONDEBUG_INSN_P (insn))
4157 FOR_EACH_INSN_INFO_USE (use, insn_info)
4159 unsigned int regno = DF_REF_REGNO (use);
4160 rtx reg = DF_REF_REG (use);
4162 /* DF_REF_READ_WRITE on a use means that this use
4163 is fabricated from a def that is a partial set
4164 to a multiword reg. Here, we only model the
4165 subreg case that is not wrapped in ZERO_EXTRACT
4166 precisely so we do not need to look at the
4167 fabricated use. */
4168 if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
4169 && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
4170 && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
4171 continue;
4173 /* Add the last use of each var to dead_or_set. */
4174 if (!bitmap_bit_p (live_relevant_regs, regno))
4176 if (regno < FIRST_PSEUDO_REGISTER)
4178 if (!fixed_regs[regno])
4179 bitmap_set_bit (&c->dead_or_set, regno);
4181 else if (pseudo_for_reload_consideration_p (regno))
4182 bitmap_set_bit (&c->dead_or_set, regno);
4185 if (regno < FIRST_PSEUDO_REGISTER
4186 || pseudo_for_reload_consideration_p (regno))
4188 if (GET_CODE (reg) == SUBREG
4189 && !DF_REF_FLAGS_IS_SET (use,
4190 DF_REF_SIGN_EXTRACT
4191 | DF_REF_ZERO_EXTRACT))
4193 unsigned int start = SUBREG_BYTE (reg);
4194 unsigned int last = start
4195 + GET_MODE_SIZE (GET_MODE (reg));
4197 init_live_subregs
4198 (bitmap_bit_p (live_relevant_regs, regno),
4199 live_subregs, live_subregs_used, regno, reg);
4201 /* Ignore the paradoxical bits. */
4202 if (last > SBITMAP_SIZE (live_subregs[regno]))
4203 last = SBITMAP_SIZE (live_subregs[regno]);
4205 while (start < last)
4207 bitmap_set_bit (live_subregs[regno], start);
4208 start++;
4211 else
4212 /* Resetting the live_subregs_used is
4213 effectively saying do not use the subregs
4214 because we are reading the whole
4215 pseudo. */
4216 bitmap_clear_bit (live_subregs_used, regno);
4217 bitmap_set_bit (live_relevant_regs, regno);
4223 /* FIXME!! The following code is a disaster. Reload needs to see the
4224 labels and jump tables that are just hanging out in between
4225 the basic blocks. See pr33676. */
4226 insn = BB_HEAD (bb);
4228 /* Skip over the barriers and cruft. */
4229 while (insn && (BARRIER_P (insn) || NOTE_P (insn)
4230 || BLOCK_FOR_INSN (insn) == bb))
4231 insn = PREV_INSN (insn);
4233 /* While we add anything except barriers and notes, the focus is
4234 to get the labels and jump tables into the
4235 reload_insn_chain. */
4236 while (insn)
4238 if (!NOTE_P (insn) && !BARRIER_P (insn))
4240 if (BLOCK_FOR_INSN (insn))
4241 break;
4243 c = new_insn_chain ();
4244 c->next = next;
4245 next = c;
4246 *p = c;
4247 p = &c->prev;
4249 /* The block makes no sense here, but it is what the old
4250 code did. */
4251 c->block = bb->index;
4252 c->insn = insn;
4253 bitmap_copy (&c->live_throughout, live_relevant_regs);
4255 insn = PREV_INSN (insn);
4259 reload_insn_chain = c;
4260 *p = NULL;
4262 for (i = 0; i < (unsigned int) max_regno; i++)
4263 if (live_subregs[i] != NULL)
4264 sbitmap_free (live_subregs[i]);
4265 free (live_subregs);
4266 BITMAP_FREE (live_subregs_used);
4267 BITMAP_FREE (live_relevant_regs);
4268 BITMAP_FREE (elim_regset);
4270 if (dump_file)
4271 print_insn_chains (dump_file);
4274 /* Examine the rtx found in *LOC, which is read or written to as determined
4275 by TYPE. Return false if we find a reason why an insn containing this
4276 rtx should not be moved (such as accesses to non-constant memory), true
4277 otherwise. */
4278 static bool
4279 rtx_moveable_p (rtx *loc, enum op_type type)
4281 const char *fmt;
4282 rtx x = *loc;
4283 enum rtx_code code = GET_CODE (x);
4284 int i, j;
4286 code = GET_CODE (x);
4287 switch (code)
4289 case CONST:
4290 CASE_CONST_ANY:
4291 case SYMBOL_REF:
4292 case LABEL_REF:
4293 return true;
4295 case PC:
4296 return type == OP_IN;
4298 case CC0:
4299 return false;
4301 case REG:
4302 if (x == frame_pointer_rtx)
4303 return true;
4304 if (HARD_REGISTER_P (x))
4305 return false;
4307 return true;
4309 case MEM:
4310 if (type == OP_IN && MEM_READONLY_P (x))
4311 return rtx_moveable_p (&XEXP (x, 0), OP_IN);
4312 return false;
4314 case SET:
4315 return (rtx_moveable_p (&SET_SRC (x), OP_IN)
4316 && rtx_moveable_p (&SET_DEST (x), OP_OUT));
4318 case STRICT_LOW_PART:
4319 return rtx_moveable_p (&XEXP (x, 0), OP_OUT);
4321 case ZERO_EXTRACT:
4322 case SIGN_EXTRACT:
4323 return (rtx_moveable_p (&XEXP (x, 0), type)
4324 && rtx_moveable_p (&XEXP (x, 1), OP_IN)
4325 && rtx_moveable_p (&XEXP (x, 2), OP_IN));
4327 case CLOBBER:
4328 return rtx_moveable_p (&SET_DEST (x), OP_OUT);
4330 case UNSPEC_VOLATILE:
4331 /* It is a bad idea to consider insns with such rtl
4332 as moveable ones. The insn scheduler also considers them as barrier
4333 for a reason. */
4334 return false;
4336 default:
4337 break;
4340 fmt = GET_RTX_FORMAT (code);
4341 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4343 if (fmt[i] == 'e')
4345 if (!rtx_moveable_p (&XEXP (x, i), type))
4346 return false;
4348 else if (fmt[i] == 'E')
4349 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4351 if (!rtx_moveable_p (&XVECEXP (x, i, j), type))
4352 return false;
4355 return true;
4358 /* A wrapper around dominated_by_p, which uses the information in UID_LUID
4359 to give dominance relationships between two insns I1 and I2. */
4360 static bool
4361 insn_dominated_by_p (rtx i1, rtx i2, int *uid_luid)
4363 basic_block bb1 = BLOCK_FOR_INSN (i1);
4364 basic_block bb2 = BLOCK_FOR_INSN (i2);
4366 if (bb1 == bb2)
4367 return uid_luid[INSN_UID (i2)] < uid_luid[INSN_UID (i1)];
4368 return dominated_by_p (CDI_DOMINATORS, bb1, bb2);
4371 /* Record the range of register numbers added by find_moveable_pseudos. */
4372 int first_moveable_pseudo, last_moveable_pseudo;
4374 /* These two vectors hold data for every register added by
4375 find_movable_pseudos, with index 0 holding data for the
4376 first_moveable_pseudo. */
4377 /* The original home register. */
4378 static vec<rtx> pseudo_replaced_reg;
4380 /* Look for instances where we have an instruction that is known to increase
4381 register pressure, and whose result is not used immediately. If it is
4382 possible to move the instruction downwards to just before its first use,
4383 split its lifetime into two ranges. We create a new pseudo to compute the
4384 value, and emit a move instruction just before the first use. If, after
4385 register allocation, the new pseudo remains unallocated, the function
4386 move_unallocated_pseudos then deletes the move instruction and places
4387 the computation just before the first use.
4389 Such a move is safe and profitable if all the input registers remain live
4390 and unchanged between the original computation and its first use. In such
4391 a situation, the computation is known to increase register pressure, and
4392 moving it is known to at least not worsen it.
4394 We restrict moves to only those cases where a register remains unallocated,
4395 in order to avoid interfering too much with the instruction schedule. As
4396 an exception, we may move insns which only modify their input register
4397 (typically induction variables), as this increases the freedom for our
4398 intended transformation, and does not limit the second instruction
4399 scheduler pass. */
4401 static void
4402 find_moveable_pseudos (void)
4404 unsigned i;
4405 int max_regs = max_reg_num ();
4406 int max_uid = get_max_uid ();
4407 basic_block bb;
4408 int *uid_luid = XNEWVEC (int, max_uid);
4409 rtx_insn **closest_uses = XNEWVEC (rtx_insn *, max_regs);
4410 /* A set of registers which are live but not modified throughout a block. */
4411 bitmap_head *bb_transp_live = XNEWVEC (bitmap_head,
4412 last_basic_block_for_fn (cfun));
4413 /* A set of registers which only exist in a given basic block. */
4414 bitmap_head *bb_local = XNEWVEC (bitmap_head,
4415 last_basic_block_for_fn (cfun));
4416 /* A set of registers which are set once, in an instruction that can be
4417 moved freely downwards, but are otherwise transparent to a block. */
4418 bitmap_head *bb_moveable_reg_sets = XNEWVEC (bitmap_head,
4419 last_basic_block_for_fn (cfun));
4420 bitmap_head live, used, set, interesting, unusable_as_input;
4421 bitmap_iterator bi;
4422 bitmap_initialize (&interesting, 0);
4424 first_moveable_pseudo = max_regs;
4425 pseudo_replaced_reg.release ();
4426 pseudo_replaced_reg.safe_grow_cleared (max_regs);
4428 df_analyze ();
4429 calculate_dominance_info (CDI_DOMINATORS);
4431 i = 0;
4432 bitmap_initialize (&live, 0);
4433 bitmap_initialize (&used, 0);
4434 bitmap_initialize (&set, 0);
4435 bitmap_initialize (&unusable_as_input, 0);
4436 FOR_EACH_BB_FN (bb, cfun)
4438 rtx_insn *insn;
4439 bitmap transp = bb_transp_live + bb->index;
4440 bitmap moveable = bb_moveable_reg_sets + bb->index;
4441 bitmap local = bb_local + bb->index;
4443 bitmap_initialize (local, 0);
4444 bitmap_initialize (transp, 0);
4445 bitmap_initialize (moveable, 0);
4446 bitmap_copy (&live, df_get_live_out (bb));
4447 bitmap_and_into (&live, df_get_live_in (bb));
4448 bitmap_copy (transp, &live);
4449 bitmap_clear (moveable);
4450 bitmap_clear (&live);
4451 bitmap_clear (&used);
4452 bitmap_clear (&set);
4453 FOR_BB_INSNS (bb, insn)
4454 if (NONDEBUG_INSN_P (insn))
4456 df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4457 df_ref def, use;
4459 uid_luid[INSN_UID (insn)] = i++;
4461 def = df_single_def (insn_info);
4462 use = df_single_use (insn_info);
4463 if (use
4464 && def
4465 && DF_REF_REGNO (use) == DF_REF_REGNO (def)
4466 && !bitmap_bit_p (&set, DF_REF_REGNO (use))
4467 && rtx_moveable_p (&PATTERN (insn), OP_IN))
4469 unsigned regno = DF_REF_REGNO (use);
4470 bitmap_set_bit (moveable, regno);
4471 bitmap_set_bit (&set, regno);
4472 bitmap_set_bit (&used, regno);
4473 bitmap_clear_bit (transp, regno);
4474 continue;
4476 FOR_EACH_INSN_INFO_USE (use, insn_info)
4478 unsigned regno = DF_REF_REGNO (use);
4479 bitmap_set_bit (&used, regno);
4480 if (bitmap_clear_bit (moveable, regno))
4481 bitmap_clear_bit (transp, regno);
4484 FOR_EACH_INSN_INFO_DEF (def, insn_info)
4486 unsigned regno = DF_REF_REGNO (def);
4487 bitmap_set_bit (&set, regno);
4488 bitmap_clear_bit (transp, regno);
4489 bitmap_clear_bit (moveable, regno);
4494 bitmap_clear (&live);
4495 bitmap_clear (&used);
4496 bitmap_clear (&set);
4498 FOR_EACH_BB_FN (bb, cfun)
4500 bitmap local = bb_local + bb->index;
4501 rtx_insn *insn;
4503 FOR_BB_INSNS (bb, insn)
4504 if (NONDEBUG_INSN_P (insn))
4506 df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4507 rtx_insn *def_insn;
4508 rtx closest_use, note;
4509 df_ref def, use;
4510 unsigned regno;
4511 bool all_dominated, all_local;
4512 machine_mode mode;
4514 def = df_single_def (insn_info);
4515 /* There must be exactly one def in this insn. */
4516 if (!def || !single_set (insn))
4517 continue;
4518 /* This must be the only definition of the reg. We also limit
4519 which modes we deal with so that we can assume we can generate
4520 move instructions. */
4521 regno = DF_REF_REGNO (def);
4522 mode = GET_MODE (DF_REF_REG (def));
4523 if (DF_REG_DEF_COUNT (regno) != 1
4524 || !DF_REF_INSN_INFO (def)
4525 || HARD_REGISTER_NUM_P (regno)
4526 || DF_REG_EQ_USE_COUNT (regno) > 0
4527 || (!INTEGRAL_MODE_P (mode) && !FLOAT_MODE_P (mode)))
4528 continue;
4529 def_insn = DF_REF_INSN (def);
4531 for (note = REG_NOTES (def_insn); note; note = XEXP (note, 1))
4532 if (REG_NOTE_KIND (note) == REG_EQUIV && MEM_P (XEXP (note, 0)))
4533 break;
4535 if (note)
4537 if (dump_file)
4538 fprintf (dump_file, "Ignoring reg %d, has equiv memory\n",
4539 regno);
4540 bitmap_set_bit (&unusable_as_input, regno);
4541 continue;
4544 use = DF_REG_USE_CHAIN (regno);
4545 all_dominated = true;
4546 all_local = true;
4547 closest_use = NULL_RTX;
4548 for (; use; use = DF_REF_NEXT_REG (use))
4550 rtx_insn *insn;
4551 if (!DF_REF_INSN_INFO (use))
4553 all_dominated = false;
4554 all_local = false;
4555 break;
4557 insn = DF_REF_INSN (use);
4558 if (DEBUG_INSN_P (insn))
4559 continue;
4560 if (BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (def_insn))
4561 all_local = false;
4562 if (!insn_dominated_by_p (insn, def_insn, uid_luid))
4563 all_dominated = false;
4564 if (closest_use != insn && closest_use != const0_rtx)
4566 if (closest_use == NULL_RTX)
4567 closest_use = insn;
4568 else if (insn_dominated_by_p (closest_use, insn, uid_luid))
4569 closest_use = insn;
4570 else if (!insn_dominated_by_p (insn, closest_use, uid_luid))
4571 closest_use = const0_rtx;
4574 if (!all_dominated)
4576 if (dump_file)
4577 fprintf (dump_file, "Reg %d not all uses dominated by set\n",
4578 regno);
4579 continue;
4581 if (all_local)
4582 bitmap_set_bit (local, regno);
4583 if (closest_use == const0_rtx || closest_use == NULL
4584 || next_nonnote_nondebug_insn (def_insn) == closest_use)
4586 if (dump_file)
4587 fprintf (dump_file, "Reg %d uninteresting%s\n", regno,
4588 closest_use == const0_rtx || closest_use == NULL
4589 ? " (no unique first use)" : "");
4590 continue;
4592 if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
4594 if (dump_file)
4595 fprintf (dump_file, "Reg %d: closest user uses cc0\n",
4596 regno);
4597 continue;
4600 bitmap_set_bit (&interesting, regno);
4601 /* If we get here, we know closest_use is a non-NULL insn
4602 (as opposed to const_0_rtx). */
4603 closest_uses[regno] = as_a <rtx_insn *> (closest_use);
4605 if (dump_file && (all_local || all_dominated))
4607 fprintf (dump_file, "Reg %u:", regno);
4608 if (all_local)
4609 fprintf (dump_file, " local to bb %d", bb->index);
4610 if (all_dominated)
4611 fprintf (dump_file, " def dominates all uses");
4612 if (closest_use != const0_rtx)
4613 fprintf (dump_file, " has unique first use");
4614 fputs ("\n", dump_file);
4619 EXECUTE_IF_SET_IN_BITMAP (&interesting, 0, i, bi)
4621 df_ref def = DF_REG_DEF_CHAIN (i);
4622 rtx_insn *def_insn = DF_REF_INSN (def);
4623 basic_block def_block = BLOCK_FOR_INSN (def_insn);
4624 bitmap def_bb_local = bb_local + def_block->index;
4625 bitmap def_bb_moveable = bb_moveable_reg_sets + def_block->index;
4626 bitmap def_bb_transp = bb_transp_live + def_block->index;
4627 bool local_to_bb_p = bitmap_bit_p (def_bb_local, i);
4628 rtx_insn *use_insn = closest_uses[i];
4629 df_ref use;
4630 bool all_ok = true;
4631 bool all_transp = true;
4633 if (!REG_P (DF_REF_REG (def)))
4634 continue;
4636 if (!local_to_bb_p)
4638 if (dump_file)
4639 fprintf (dump_file, "Reg %u not local to one basic block\n",
4641 continue;
4643 if (reg_equiv_init (i) != NULL_RTX)
4645 if (dump_file)
4646 fprintf (dump_file, "Ignoring reg %u with equiv init insn\n",
4648 continue;
4650 if (!rtx_moveable_p (&PATTERN (def_insn), OP_IN))
4652 if (dump_file)
4653 fprintf (dump_file, "Found def insn %d for %d to be not moveable\n",
4654 INSN_UID (def_insn), i);
4655 continue;
4657 if (dump_file)
4658 fprintf (dump_file, "Examining insn %d, def for %d\n",
4659 INSN_UID (def_insn), i);
4660 FOR_EACH_INSN_USE (use, def_insn)
4662 unsigned regno = DF_REF_REGNO (use);
4663 if (bitmap_bit_p (&unusable_as_input, regno))
4665 all_ok = false;
4666 if (dump_file)
4667 fprintf (dump_file, " found unusable input reg %u.\n", regno);
4668 break;
4670 if (!bitmap_bit_p (def_bb_transp, regno))
4672 if (bitmap_bit_p (def_bb_moveable, regno)
4673 && !control_flow_insn_p (use_insn)
4674 && (!HAVE_cc0 || !sets_cc0_p (use_insn)))
4676 if (modified_between_p (DF_REF_REG (use), def_insn, use_insn))
4678 rtx_insn *x = NEXT_INSN (def_insn);
4679 while (!modified_in_p (DF_REF_REG (use), x))
4681 gcc_assert (x != use_insn);
4682 x = NEXT_INSN (x);
4684 if (dump_file)
4685 fprintf (dump_file, " input reg %u modified but insn %d moveable\n",
4686 regno, INSN_UID (x));
4687 emit_insn_after (PATTERN (x), use_insn);
4688 set_insn_deleted (x);
4690 else
4692 if (dump_file)
4693 fprintf (dump_file, " input reg %u modified between def and use\n",
4694 regno);
4695 all_transp = false;
4698 else
4699 all_transp = false;
4702 if (!all_ok)
4703 continue;
4704 if (!dbg_cnt (ira_move))
4705 break;
4706 if (dump_file)
4707 fprintf (dump_file, " all ok%s\n", all_transp ? " and transp" : "");
4709 if (all_transp)
4711 rtx def_reg = DF_REF_REG (def);
4712 rtx newreg = ira_create_new_reg (def_reg);
4713 if (validate_change (def_insn, DF_REF_REAL_LOC (def), newreg, 0))
4715 unsigned nregno = REGNO (newreg);
4716 emit_insn_before (gen_move_insn (def_reg, newreg), use_insn);
4717 nregno -= max_regs;
4718 pseudo_replaced_reg[nregno] = def_reg;
4723 FOR_EACH_BB_FN (bb, cfun)
4725 bitmap_clear (bb_local + bb->index);
4726 bitmap_clear (bb_transp_live + bb->index);
4727 bitmap_clear (bb_moveable_reg_sets + bb->index);
4729 bitmap_clear (&interesting);
4730 bitmap_clear (&unusable_as_input);
4731 free (uid_luid);
4732 free (closest_uses);
4733 free (bb_local);
4734 free (bb_transp_live);
4735 free (bb_moveable_reg_sets);
4737 last_moveable_pseudo = max_reg_num ();
4739 fix_reg_equiv_init ();
4740 expand_reg_info ();
4741 regstat_free_n_sets_and_refs ();
4742 regstat_free_ri ();
4743 regstat_init_n_sets_and_refs ();
4744 regstat_compute_ri ();
4745 free_dominance_info (CDI_DOMINATORS);
4748 /* If SET pattern SET is an assignment from a hard register to a pseudo which
4749 is live at CALL_DOM (if non-NULL, otherwise this check is omitted), return
4750 the destination. Otherwise return NULL. */
4752 static rtx
4753 interesting_dest_for_shprep_1 (rtx set, basic_block call_dom)
4755 rtx src = SET_SRC (set);
4756 rtx dest = SET_DEST (set);
4757 if (!REG_P (src) || !HARD_REGISTER_P (src)
4758 || !REG_P (dest) || HARD_REGISTER_P (dest)
4759 || (call_dom && !bitmap_bit_p (df_get_live_in (call_dom), REGNO (dest))))
4760 return NULL;
4761 return dest;
4764 /* If insn is interesting for parameter range-splitting shrink-wrapping
4765 preparation, i.e. it is a single set from a hard register to a pseudo, which
4766 is live at CALL_DOM (if non-NULL, otherwise this check is omitted), or a
4767 parallel statement with only one such statement, return the destination.
4768 Otherwise return NULL. */
4770 static rtx
4771 interesting_dest_for_shprep (rtx_insn *insn, basic_block call_dom)
4773 if (!INSN_P (insn))
4774 return NULL;
4775 rtx pat = PATTERN (insn);
4776 if (GET_CODE (pat) == SET)
4777 return interesting_dest_for_shprep_1 (pat, call_dom);
4779 if (GET_CODE (pat) != PARALLEL)
4780 return NULL;
4781 rtx ret = NULL;
4782 for (int i = 0; i < XVECLEN (pat, 0); i++)
4784 rtx sub = XVECEXP (pat, 0, i);
4785 if (GET_CODE (sub) == USE || GET_CODE (sub) == CLOBBER)
4786 continue;
4787 if (GET_CODE (sub) != SET
4788 || side_effects_p (sub))
4789 return NULL;
4790 rtx dest = interesting_dest_for_shprep_1 (sub, call_dom);
4791 if (dest && ret)
4792 return NULL;
4793 if (dest)
4794 ret = dest;
4796 return ret;
4799 /* Split live ranges of pseudos that are loaded from hard registers in the
4800 first BB in a BB that dominates all non-sibling call if such a BB can be
4801 found and is not in a loop. Return true if the function has made any
4802 changes. */
4804 static bool
4805 split_live_ranges_for_shrink_wrap (void)
4807 basic_block bb, call_dom = NULL;
4808 basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4809 rtx_insn *insn, *last_interesting_insn = NULL;
4810 bitmap_head need_new, reachable;
4811 vec<basic_block> queue;
4813 if (!SHRINK_WRAPPING_ENABLED)
4814 return false;
4816 bitmap_initialize (&need_new, 0);
4817 bitmap_initialize (&reachable, 0);
4818 queue.create (n_basic_blocks_for_fn (cfun));
4820 FOR_EACH_BB_FN (bb, cfun)
4821 FOR_BB_INSNS (bb, insn)
4822 if (CALL_P (insn) && !SIBLING_CALL_P (insn))
4824 if (bb == first)
4826 bitmap_clear (&need_new);
4827 bitmap_clear (&reachable);
4828 queue.release ();
4829 return false;
4832 bitmap_set_bit (&need_new, bb->index);
4833 bitmap_set_bit (&reachable, bb->index);
4834 queue.quick_push (bb);
4835 break;
4838 if (queue.is_empty ())
4840 bitmap_clear (&need_new);
4841 bitmap_clear (&reachable);
4842 queue.release ();
4843 return false;
4846 while (!queue.is_empty ())
4848 edge e;
4849 edge_iterator ei;
4851 bb = queue.pop ();
4852 FOR_EACH_EDGE (e, ei, bb->succs)
4853 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
4854 && bitmap_set_bit (&reachable, e->dest->index))
4855 queue.quick_push (e->dest);
4857 queue.release ();
4859 FOR_BB_INSNS (first, insn)
4861 rtx dest = interesting_dest_for_shprep (insn, NULL);
4862 if (!dest)
4863 continue;
4865 if (DF_REG_DEF_COUNT (REGNO (dest)) > 1)
4867 bitmap_clear (&need_new);
4868 bitmap_clear (&reachable);
4869 return false;
4872 for (df_ref use = DF_REG_USE_CHAIN (REGNO(dest));
4873 use;
4874 use = DF_REF_NEXT_REG (use))
4876 int ubbi = DF_REF_BB (use)->index;
4877 if (bitmap_bit_p (&reachable, ubbi))
4878 bitmap_set_bit (&need_new, ubbi);
4880 last_interesting_insn = insn;
4883 bitmap_clear (&reachable);
4884 if (!last_interesting_insn)
4886 bitmap_clear (&need_new);
4887 return false;
4890 call_dom = nearest_common_dominator_for_set (CDI_DOMINATORS, &need_new);
4891 bitmap_clear (&need_new);
4892 if (call_dom == first)
4893 return false;
4895 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
4896 while (bb_loop_depth (call_dom) > 0)
4897 call_dom = get_immediate_dominator (CDI_DOMINATORS, call_dom);
4898 loop_optimizer_finalize ();
4900 if (call_dom == first)
4901 return false;
4903 calculate_dominance_info (CDI_POST_DOMINATORS);
4904 if (dominated_by_p (CDI_POST_DOMINATORS, first, call_dom))
4906 free_dominance_info (CDI_POST_DOMINATORS);
4907 return false;
4909 free_dominance_info (CDI_POST_DOMINATORS);
4911 if (dump_file)
4912 fprintf (dump_file, "Will split live ranges of parameters at BB %i\n",
4913 call_dom->index);
4915 bool ret = false;
4916 FOR_BB_INSNS (first, insn)
4918 rtx dest = interesting_dest_for_shprep (insn, call_dom);
4919 if (!dest || dest == pic_offset_table_rtx)
4920 continue;
4922 rtx newreg = NULL_RTX;
4923 df_ref use, next;
4924 for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next)
4926 rtx_insn *uin = DF_REF_INSN (use);
4927 next = DF_REF_NEXT_REG (use);
4929 basic_block ubb = BLOCK_FOR_INSN (uin);
4930 if (ubb == call_dom
4931 || dominated_by_p (CDI_DOMINATORS, ubb, call_dom))
4933 if (!newreg)
4934 newreg = ira_create_new_reg (dest);
4935 validate_change (uin, DF_REF_REAL_LOC (use), newreg, true);
4939 if (newreg)
4941 rtx_insn *new_move = gen_move_insn (newreg, dest);
4942 emit_insn_after (new_move, bb_note (call_dom));
4943 if (dump_file)
4945 fprintf (dump_file, "Split live-range of register ");
4946 print_rtl_single (dump_file, dest);
4948 ret = true;
4951 if (insn == last_interesting_insn)
4952 break;
4954 apply_change_group ();
4955 return ret;
4958 /* Perform the second half of the transformation started in
4959 find_moveable_pseudos. We look for instances where the newly introduced
4960 pseudo remains unallocated, and remove it by moving the definition to
4961 just before its use, replacing the move instruction generated by
4962 find_moveable_pseudos. */
4963 static void
4964 move_unallocated_pseudos (void)
4966 int i;
4967 for (i = first_moveable_pseudo; i < last_moveable_pseudo; i++)
4968 if (reg_renumber[i] < 0)
4970 int idx = i - first_moveable_pseudo;
4971 rtx other_reg = pseudo_replaced_reg[idx];
4972 rtx_insn *def_insn = DF_REF_INSN (DF_REG_DEF_CHAIN (i));
4973 /* The use must follow all definitions of OTHER_REG, so we can
4974 insert the new definition immediately after any of them. */
4975 df_ref other_def = DF_REG_DEF_CHAIN (REGNO (other_reg));
4976 rtx_insn *move_insn = DF_REF_INSN (other_def);
4977 rtx_insn *newinsn = emit_insn_after (PATTERN (def_insn), move_insn);
4978 rtx set;
4979 int success;
4981 if (dump_file)
4982 fprintf (dump_file, "moving def of %d (insn %d now) ",
4983 REGNO (other_reg), INSN_UID (def_insn));
4985 delete_insn (move_insn);
4986 while ((other_def = DF_REG_DEF_CHAIN (REGNO (other_reg))))
4987 delete_insn (DF_REF_INSN (other_def));
4988 delete_insn (def_insn);
4990 set = single_set (newinsn);
4991 success = validate_change (newinsn, &SET_DEST (set), other_reg, 0);
4992 gcc_assert (success);
4993 if (dump_file)
4994 fprintf (dump_file, " %d) rather than keep unallocated replacement %d\n",
4995 INSN_UID (newinsn), i);
4996 SET_REG_N_REFS (i, 0);
5000 /* If the backend knows where to allocate pseudos for hard
5001 register initial values, register these allocations now. */
5002 static void
5003 allocate_initial_values (void)
5005 if (targetm.allocate_initial_value)
5007 rtx hreg, preg, x;
5008 int i, regno;
5010 for (i = 0; HARD_REGISTER_NUM_P (i); i++)
5012 if (! initial_value_entry (i, &hreg, &preg))
5013 break;
5015 x = targetm.allocate_initial_value (hreg);
5016 regno = REGNO (preg);
5017 if (x && REG_N_SETS (regno) <= 1)
5019 if (MEM_P (x))
5020 reg_equiv_memory_loc (regno) = x;
5021 else
5023 basic_block bb;
5024 int new_regno;
5026 gcc_assert (REG_P (x));
5027 new_regno = REGNO (x);
5028 reg_renumber[regno] = new_regno;
5029 /* Poke the regno right into regno_reg_rtx so that even
5030 fixed regs are accepted. */
5031 SET_REGNO (preg, new_regno);
5032 /* Update global register liveness information. */
5033 FOR_EACH_BB_FN (bb, cfun)
5035 if (REGNO_REG_SET_P (df_get_live_in (bb), regno))
5036 SET_REGNO_REG_SET (df_get_live_in (bb), new_regno);
5037 if (REGNO_REG_SET_P (df_get_live_out (bb), regno))
5038 SET_REGNO_REG_SET (df_get_live_out (bb), new_regno);
5044 gcc_checking_assert (! initial_value_entry (FIRST_PSEUDO_REGISTER,
5045 &hreg, &preg));
5050 /* True when we use LRA instead of reload pass for the current
5051 function. */
5052 bool ira_use_lra_p;
5054 /* True if we have allocno conflicts. It is false for non-optimized
5055 mode or when the conflict table is too big. */
5056 bool ira_conflicts_p;
5058 /* Saved between IRA and reload. */
5059 static int saved_flag_ira_share_spill_slots;
5061 /* This is the main entry of IRA. */
5062 static void
5063 ira (FILE *f)
5065 bool loops_p;
5066 int ira_max_point_before_emit;
5067 bool saved_flag_caller_saves = flag_caller_saves;
5068 enum ira_region saved_flag_ira_region = flag_ira_region;
5070 /* Perform target specific PIC register initialization. */
5071 targetm.init_pic_reg ();
5073 ira_conflicts_p = optimize > 0;
5075 ira_use_lra_p = targetm.lra_p ();
5076 /* If there are too many pseudos and/or basic blocks (e.g. 10K
5077 pseudos and 10K blocks or 100K pseudos and 1K blocks), we will
5078 use simplified and faster algorithms in LRA. */
5079 lra_simple_p
5080 = (ira_use_lra_p
5081 && max_reg_num () >= (1 << 26) / last_basic_block_for_fn (cfun));
5082 if (lra_simple_p)
5084 /* It permits to skip live range splitting in LRA. */
5085 flag_caller_saves = false;
5086 /* There is no sense to do regional allocation when we use
5087 simplified LRA. */
5088 flag_ira_region = IRA_REGION_ONE;
5089 ira_conflicts_p = false;
5092 #ifndef IRA_NO_OBSTACK
5093 gcc_obstack_init (&ira_obstack);
5094 #endif
5095 bitmap_obstack_initialize (&ira_bitmap_obstack);
5097 /* LRA uses its own infrastructure to handle caller save registers. */
5098 if (flag_caller_saves && !ira_use_lra_p)
5099 init_caller_save ();
5101 if (flag_ira_verbose < 10)
5103 internal_flag_ira_verbose = flag_ira_verbose;
5104 ira_dump_file = f;
5106 else
5108 internal_flag_ira_verbose = flag_ira_verbose - 10;
5109 ira_dump_file = stderr;
5112 setup_prohibited_mode_move_regs ();
5113 decrease_live_ranges_number ();
5114 df_note_add_problem ();
5116 /* DF_LIVE can't be used in the register allocator, too many other
5117 parts of the compiler depend on using the "classic" liveness
5118 interpretation of the DF_LR problem. See PR38711.
5119 Remove the problem, so that we don't spend time updating it in
5120 any of the df_analyze() calls during IRA/LRA. */
5121 if (optimize > 1)
5122 df_remove_problem (df_live);
5123 gcc_checking_assert (df_live == NULL);
5125 if (flag_checking)
5126 df->changeable_flags |= DF_VERIFY_SCHEDULED;
5128 df_analyze ();
5130 init_reg_equiv ();
5131 if (ira_conflicts_p)
5133 calculate_dominance_info (CDI_DOMINATORS);
5135 if (split_live_ranges_for_shrink_wrap ())
5136 df_analyze ();
5138 free_dominance_info (CDI_DOMINATORS);
5141 df_clear_flags (DF_NO_INSN_RESCAN);
5143 indirect_jump_optimize ();
5144 if (delete_trivially_dead_insns (get_insns (), max_reg_num ()))
5145 df_analyze ();
5147 regstat_init_n_sets_and_refs ();
5148 regstat_compute_ri ();
5150 /* If we are not optimizing, then this is the only place before
5151 register allocation where dataflow is done. And that is needed
5152 to generate these warnings. */
5153 if (warn_clobbered)
5154 generate_setjmp_warnings ();
5156 /* Determine if the current function is a leaf before running IRA
5157 since this can impact optimizations done by the prologue and
5158 epilogue thus changing register elimination offsets. */
5159 crtl->is_leaf = leaf_function_p ();
5161 if (resize_reg_info () && flag_ira_loop_pressure)
5162 ira_set_pseudo_classes (true, ira_dump_file);
5164 init_alias_analysis ();
5165 reg_equiv = XCNEWVEC (struct equivalence, max_reg_num ());
5166 update_equiv_regs ();
5168 /* Don't move insns if live range shrinkage or register
5169 pressure-sensitive scheduling were done because it will not
5170 improve allocation but likely worsen insn scheduling. */
5171 if (optimize
5172 && !flag_live_range_shrinkage
5173 && !(flag_sched_pressure && flag_schedule_insns))
5174 combine_and_move_insns ();
5176 /* Gather additional equivalences with memory. */
5177 if (optimize)
5178 add_store_equivs ();
5180 end_alias_analysis ();
5181 free (reg_equiv);
5183 setup_reg_equiv ();
5184 grow_reg_equivs ();
5185 setup_reg_equiv_init ();
5187 allocated_reg_info_size = max_reg_num ();
5189 /* It is not worth to do such improvement when we use a simple
5190 allocation because of -O0 usage or because the function is too
5191 big. */
5192 if (ira_conflicts_p)
5193 find_moveable_pseudos ();
5195 max_regno_before_ira = max_reg_num ();
5196 ira_setup_eliminable_regset ();
5198 ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
5199 ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
5200 ira_move_loops_num = ira_additional_jumps_num = 0;
5202 ira_assert (current_loops == NULL);
5203 if (flag_ira_region == IRA_REGION_ALL || flag_ira_region == IRA_REGION_MIXED)
5204 loop_optimizer_init (AVOID_CFG_MODIFICATIONS | LOOPS_HAVE_RECORDED_EXITS);
5206 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
5207 fprintf (ira_dump_file, "Building IRA IR\n");
5208 loops_p = ira_build ();
5210 ira_assert (ira_conflicts_p || !loops_p);
5212 saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
5213 if (too_high_register_pressure_p () || cfun->calls_setjmp)
5214 /* It is just wasting compiler's time to pack spilled pseudos into
5215 stack slots in this case -- prohibit it. We also do this if
5216 there is setjmp call because a variable not modified between
5217 setjmp and longjmp the compiler is required to preserve its
5218 value and sharing slots does not guarantee it. */
5219 flag_ira_share_spill_slots = FALSE;
5221 ira_color ();
5223 ira_max_point_before_emit = ira_max_point;
5225 ira_initiate_emit_data ();
5227 ira_emit (loops_p);
5229 max_regno = max_reg_num ();
5230 if (ira_conflicts_p)
5232 if (! loops_p)
5234 if (! ira_use_lra_p)
5235 ira_initiate_assign ();
5237 else
5239 expand_reg_info ();
5241 if (ira_use_lra_p)
5243 ira_allocno_t a;
5244 ira_allocno_iterator ai;
5246 FOR_EACH_ALLOCNO (a, ai)
5248 int old_regno = ALLOCNO_REGNO (a);
5249 int new_regno = REGNO (ALLOCNO_EMIT_DATA (a)->reg);
5251 ALLOCNO_REGNO (a) = new_regno;
5253 if (old_regno != new_regno)
5254 setup_reg_classes (new_regno, reg_preferred_class (old_regno),
5255 reg_alternate_class (old_regno),
5256 reg_allocno_class (old_regno));
5260 else
5262 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
5263 fprintf (ira_dump_file, "Flattening IR\n");
5264 ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
5266 /* New insns were generated: add notes and recalculate live
5267 info. */
5268 df_analyze ();
5270 /* ??? Rebuild the loop tree, but why? Does the loop tree
5271 change if new insns were generated? Can that be handled
5272 by updating the loop tree incrementally? */
5273 loop_optimizer_finalize ();
5274 free_dominance_info (CDI_DOMINATORS);
5275 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
5276 | LOOPS_HAVE_RECORDED_EXITS);
5278 if (! ira_use_lra_p)
5280 setup_allocno_assignment_flags ();
5281 ira_initiate_assign ();
5282 ira_reassign_conflict_allocnos (max_regno);
5287 ira_finish_emit_data ();
5289 setup_reg_renumber ();
5291 calculate_allocation_cost ();
5293 #ifdef ENABLE_IRA_CHECKING
5294 if (ira_conflicts_p)
5295 check_allocation ();
5296 #endif
5298 if (max_regno != max_regno_before_ira)
5300 regstat_free_n_sets_and_refs ();
5301 regstat_free_ri ();
5302 regstat_init_n_sets_and_refs ();
5303 regstat_compute_ri ();
5306 overall_cost_before = ira_overall_cost;
5307 if (! ira_conflicts_p)
5308 grow_reg_equivs ();
5309 else
5311 fix_reg_equiv_init ();
5313 #ifdef ENABLE_IRA_CHECKING
5314 print_redundant_copies ();
5315 #endif
5316 if (! ira_use_lra_p)
5318 ira_spilled_reg_stack_slots_num = 0;
5319 ira_spilled_reg_stack_slots
5320 = ((struct ira_spilled_reg_stack_slot *)
5321 ira_allocate (max_regno
5322 * sizeof (struct ira_spilled_reg_stack_slot)));
5323 memset (ira_spilled_reg_stack_slots, 0,
5324 max_regno * sizeof (struct ira_spilled_reg_stack_slot));
5327 allocate_initial_values ();
5329 /* See comment for find_moveable_pseudos call. */
5330 if (ira_conflicts_p)
5331 move_unallocated_pseudos ();
5333 /* Restore original values. */
5334 if (lra_simple_p)
5336 flag_caller_saves = saved_flag_caller_saves;
5337 flag_ira_region = saved_flag_ira_region;
5341 static void
5342 do_reload (void)
5344 basic_block bb;
5345 bool need_dce;
5346 unsigned pic_offset_table_regno = INVALID_REGNUM;
5348 if (flag_ira_verbose < 10)
5349 ira_dump_file = dump_file;
5351 /* If pic_offset_table_rtx is a pseudo register, then keep it so
5352 after reload to avoid possible wrong usages of hard reg assigned
5353 to it. */
5354 if (pic_offset_table_rtx
5355 && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
5356 pic_offset_table_regno = REGNO (pic_offset_table_rtx);
5358 timevar_push (TV_RELOAD);
5359 if (ira_use_lra_p)
5361 if (current_loops != NULL)
5363 loop_optimizer_finalize ();
5364 free_dominance_info (CDI_DOMINATORS);
5366 FOR_ALL_BB_FN (bb, cfun)
5367 bb->loop_father = NULL;
5368 current_loops = NULL;
5370 ira_destroy ();
5372 lra (ira_dump_file);
5373 /* ???!!! Move it before lra () when we use ira_reg_equiv in
5374 LRA. */
5375 vec_free (reg_equivs);
5376 reg_equivs = NULL;
5377 need_dce = false;
5379 else
5381 df_set_flags (DF_NO_INSN_RESCAN);
5382 build_insn_chain ();
5384 need_dce = reload (get_insns (), ira_conflicts_p);
5387 timevar_pop (TV_RELOAD);
5389 timevar_push (TV_IRA);
5391 if (ira_conflicts_p && ! ira_use_lra_p)
5393 ira_free (ira_spilled_reg_stack_slots);
5394 ira_finish_assign ();
5397 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
5398 && overall_cost_before != ira_overall_cost)
5399 fprintf (ira_dump_file, "+++Overall after reload %" PRId64 "\n",
5400 ira_overall_cost);
5402 flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
5404 if (! ira_use_lra_p)
5406 ira_destroy ();
5407 if (current_loops != NULL)
5409 loop_optimizer_finalize ();
5410 free_dominance_info (CDI_DOMINATORS);
5412 FOR_ALL_BB_FN (bb, cfun)
5413 bb->loop_father = NULL;
5414 current_loops = NULL;
5416 regstat_free_ri ();
5417 regstat_free_n_sets_and_refs ();
5420 if (optimize)
5421 cleanup_cfg (CLEANUP_EXPENSIVE);
5423 finish_reg_equiv ();
5425 bitmap_obstack_release (&ira_bitmap_obstack);
5426 #ifndef IRA_NO_OBSTACK
5427 obstack_free (&ira_obstack, NULL);
5428 #endif
5430 /* The code after the reload has changed so much that at this point
5431 we might as well just rescan everything. Note that
5432 df_rescan_all_insns is not going to help here because it does not
5433 touch the artificial uses and defs. */
5434 df_finish_pass (true);
5435 df_scan_alloc (NULL);
5436 df_scan_blocks ();
5438 if (optimize > 1)
5440 df_live_add_problem ();
5441 df_live_set_all_dirty ();
5444 if (optimize)
5445 df_analyze ();
5447 if (need_dce && optimize)
5448 run_fast_dce ();
5450 /* Diagnose uses of the hard frame pointer when it is used as a global
5451 register. Often we can get away with letting the user appropriate
5452 the frame pointer, but we should let them know when code generation
5453 makes that impossible. */
5454 if (global_regs[HARD_FRAME_POINTER_REGNUM] && frame_pointer_needed)
5456 tree decl = global_regs_decl[HARD_FRAME_POINTER_REGNUM];
5457 error_at (DECL_SOURCE_LOCATION (current_function_decl),
5458 "frame pointer required, but reserved");
5459 inform (DECL_SOURCE_LOCATION (decl), "for %qD", decl);
5462 /* If we are doing generic stack checking, give a warning if this
5463 function's frame size is larger than we expect. */
5464 if (flag_stack_check == GENERIC_STACK_CHECK)
5466 HOST_WIDE_INT size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
5468 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5469 if (df_regs_ever_live_p (i) && !fixed_regs[i] && call_used_regs[i])
5470 size += UNITS_PER_WORD;
5472 if (size > STACK_CHECK_MAX_FRAME_SIZE)
5473 warning (0, "frame size too large for reliable stack checking");
5476 if (pic_offset_table_regno != INVALID_REGNUM)
5477 pic_offset_table_rtx = gen_rtx_REG (Pmode, pic_offset_table_regno);
5479 timevar_pop (TV_IRA);
5482 /* Run the integrated register allocator. */
5484 namespace {
5486 const pass_data pass_data_ira =
5488 RTL_PASS, /* type */
5489 "ira", /* name */
5490 OPTGROUP_NONE, /* optinfo_flags */
5491 TV_IRA, /* tv_id */
5492 0, /* properties_required */
5493 0, /* properties_provided */
5494 0, /* properties_destroyed */
5495 0, /* todo_flags_start */
5496 TODO_do_not_ggc_collect, /* todo_flags_finish */
5499 class pass_ira : public rtl_opt_pass
5501 public:
5502 pass_ira (gcc::context *ctxt)
5503 : rtl_opt_pass (pass_data_ira, ctxt)
5506 /* opt_pass methods: */
5507 virtual bool gate (function *)
5509 return !targetm.no_register_allocation;
5511 virtual unsigned int execute (function *)
5513 ira (dump_file);
5514 return 0;
5517 }; // class pass_ira
5519 } // anon namespace
5521 rtl_opt_pass *
5522 make_pass_ira (gcc::context *ctxt)
5524 return new pass_ira (ctxt);
5527 namespace {
5529 const pass_data pass_data_reload =
5531 RTL_PASS, /* type */
5532 "reload", /* name */
5533 OPTGROUP_NONE, /* optinfo_flags */
5534 TV_RELOAD, /* tv_id */
5535 0, /* properties_required */
5536 0, /* properties_provided */
5537 0, /* properties_destroyed */
5538 0, /* todo_flags_start */
5539 0, /* todo_flags_finish */
5542 class pass_reload : public rtl_opt_pass
5544 public:
5545 pass_reload (gcc::context *ctxt)
5546 : rtl_opt_pass (pass_data_reload, ctxt)
5549 /* opt_pass methods: */
5550 virtual bool gate (function *)
5552 return !targetm.no_register_allocation;
5554 virtual unsigned int execute (function *)
5556 do_reload ();
5557 return 0;
5560 }; // class pass_reload
5562 } // anon namespace
5564 rtl_opt_pass *
5565 make_pass_reload (gcc::context *ctxt)
5567 return new pass_reload (ctxt);