[PR67828] don't unswitch on default defs of non-parms
[official-gcc.git] / gcc / lra-spills.c
bloba210c41da3151ad175e07874e8c1f123804e2e7d
1 /* Change pseudos by memory.
2 Copyright (C) 2010-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 /* This file contains code for a pass to change spilled pseudos into
23 memory.
25 The pass creates necessary stack slots and assigns spilled pseudos
26 to the stack slots in following way:
28 for all spilled pseudos P most frequently used first do
29 for all stack slots S do
30 if P doesn't conflict with pseudos assigned to S then
31 assign S to P and goto to the next pseudo process
32 end
33 end
34 create new stack slot S and assign P to S
35 end
37 The actual algorithm is bit more complicated because of different
38 pseudo sizes.
40 After that the code changes spilled pseudos (except ones created
41 from scratches) by corresponding stack slot memory in RTL.
43 If at least one stack slot was created, we need to run more passes
44 because we have new addresses which should be checked and because
45 the old address displacements might change and address constraints
46 (or insn memory constraints) might not be satisfied any more.
48 For some targets, the pass can spill some pseudos into hard
49 registers of different class (usually into vector registers)
50 instead of spilling them into memory if it is possible and
51 profitable. Spilling GENERAL_REGS pseudo into SSE registers for
52 Intel Corei7 is an example of such optimization. And this is
53 actually recommended by Intel optimization guide.
55 The file also contains code for final change of pseudos on hard
56 regs correspondingly assigned to them. */
58 #include "config.h"
59 #include "system.h"
60 #include "coretypes.h"
61 #include "backend.h"
62 #include "tree.h"
63 #include "rtl.h"
64 #include "df.h"
65 #include "tm_p.h"
66 #include "insn-config.h"
67 #include "recog.h"
68 #include "output.h"
69 #include "regs.h"
70 #include "flags.h"
71 #include "alias.h"
72 #include "expmed.h"
73 #include "dojump.h"
74 #include "explow.h"
75 #include "calls.h"
76 #include "emit-rtl.h"
77 #include "varasm.h"
78 #include "stmt.h"
79 #include "expr.h"
80 #include "cfgrtl.h"
81 #include "except.h"
82 #include "timevar.h"
83 #include "target.h"
84 #include "alloc-pool.h"
85 #include "lra.h"
86 #include "insn-attr.h"
87 #include "insn-codes.h"
88 #include "lra-int.h"
89 #include "ira.h"
92 /* Max regno at the start of the pass. */
93 static int regs_num;
95 /* Map spilled regno -> hard regno used instead of memory for
96 spilling. */
97 static rtx *spill_hard_reg;
99 /* The structure describes stack slot of a spilled pseudo. */
100 struct pseudo_slot
102 /* Number (0, 1, ...) of the stack slot to which given pseudo
103 belongs. */
104 int slot_num;
105 /* First or next slot with the same slot number. */
106 struct pseudo_slot *next, *first;
107 /* Memory representing the spilled pseudo. */
108 rtx mem;
111 /* The stack slots for each spilled pseudo. Indexed by regnos. */
112 static struct pseudo_slot *pseudo_slots;
114 /* The structure describes a register or a stack slot which can be
115 used for several spilled pseudos. */
116 struct slot
118 /* First pseudo with given stack slot. */
119 int regno;
120 /* Hard reg into which the slot pseudos are spilled. The value is
121 negative for pseudos spilled into memory. */
122 int hard_regno;
123 /* Memory representing the all stack slot. It can be different from
124 memory representing a pseudo belonging to give stack slot because
125 pseudo can be placed in a part of the corresponding stack slot.
126 The value is NULL for pseudos spilled into a hard reg. */
127 rtx mem;
128 /* Combined live ranges of all pseudos belonging to given slot. It
129 is used to figure out that a new spilled pseudo can use given
130 stack slot. */
131 lra_live_range_t live_ranges;
134 /* Array containing info about the stack slots. The array element is
135 indexed by the stack slot number in the range [0..slots_num). */
136 static struct slot *slots;
137 /* The number of the stack slots currently existing. */
138 static int slots_num;
140 /* Set up memory of the spilled pseudo I. The function can allocate
141 the corresponding stack slot if it is not done yet. */
142 static void
143 assign_mem_slot (int i)
145 rtx x = NULL_RTX;
146 machine_mode mode = GET_MODE (regno_reg_rtx[i]);
147 unsigned int inherent_size = PSEUDO_REGNO_BYTES (i);
148 unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
149 unsigned int max_ref_width = GET_MODE_SIZE (lra_reg_info[i].biggest_mode);
150 unsigned int total_size = MAX (inherent_size, max_ref_width);
151 unsigned int min_align = max_ref_width * BITS_PER_UNIT;
152 int adjust = 0;
154 lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
155 && lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0);
157 x = slots[pseudo_slots[i].slot_num].mem;
159 /* We can use a slot already allocated because it is guaranteed the
160 slot provides both enough inherent space and enough total
161 space. */
162 if (x)
164 /* Each pseudo has an inherent size which comes from its own mode,
165 and a total size which provides room for paradoxical subregs
166 which refer to the pseudo reg in wider modes. We allocate a new
167 slot, making sure that it has enough inherent space and total
168 space. */
169 else
171 rtx stack_slot;
173 /* No known place to spill from => no slot to reuse. */
174 x = assign_stack_local (mode, total_size,
175 min_align > inherent_align
176 || total_size > inherent_size ? -1 : 0);
177 stack_slot = x;
178 /* Cancel the big-endian correction done in assign_stack_local.
179 Get the address of the beginning of the slot. This is so we
180 can do a big-endian correction unconditionally below. */
181 if (BYTES_BIG_ENDIAN)
183 adjust = inherent_size - total_size;
184 if (adjust)
185 stack_slot
186 = adjust_address_nv (x,
187 mode_for_size (total_size * BITS_PER_UNIT,
188 MODE_INT, 1),
189 adjust);
191 slots[pseudo_slots[i].slot_num].mem = stack_slot;
194 /* On a big endian machine, the "address" of the slot is the address
195 of the low part that fits its inherent mode. */
196 if (BYTES_BIG_ENDIAN && inherent_size < total_size)
197 adjust += (total_size - inherent_size);
199 x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust);
201 /* Set all of the memory attributes as appropriate for a spill. */
202 set_mem_attrs_for_spill (x);
203 pseudo_slots[i].mem = x;
206 /* Sort pseudos according their usage frequencies. */
207 static int
208 regno_freq_compare (const void *v1p, const void *v2p)
210 const int regno1 = *(const int *) v1p;
211 const int regno2 = *(const int *) v2p;
212 int diff;
214 if ((diff = lra_reg_info[regno2].freq - lra_reg_info[regno1].freq) != 0)
215 return diff;
216 return regno1 - regno2;
219 /* Sort pseudos according to their slots, putting the slots in the order
220 that they should be allocated. Slots with lower numbers have the highest
221 priority and should get the smallest displacement from the stack or
222 frame pointer (whichever is being used).
224 The first allocated slot is always closest to the frame pointer,
225 so prefer lower slot numbers when frame_pointer_needed. If the stack
226 and frame grow in the same direction, then the first allocated slot is
227 always closest to the initial stack pointer and furthest away from the
228 final stack pointer, so allocate higher numbers first when using the
229 stack pointer in that case. The reverse is true if the stack and
230 frame grow in opposite directions. */
231 static int
232 pseudo_reg_slot_compare (const void *v1p, const void *v2p)
234 const int regno1 = *(const int *) v1p;
235 const int regno2 = *(const int *) v2p;
236 int diff, slot_num1, slot_num2;
237 int total_size1, total_size2;
239 slot_num1 = pseudo_slots[regno1].slot_num;
240 slot_num2 = pseudo_slots[regno2].slot_num;
241 if ((diff = slot_num1 - slot_num2) != 0)
242 return (frame_pointer_needed
243 || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
244 total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode);
245 total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode);
246 if ((diff = total_size2 - total_size1) != 0)
247 return diff;
248 return regno1 - regno2;
251 /* Assign spill hard registers to N pseudos in PSEUDO_REGNOS which is
252 sorted in order of highest frequency first. Put the pseudos which
253 did not get a spill hard register at the beginning of array
254 PSEUDO_REGNOS. Return the number of such pseudos. */
255 static int
256 assign_spill_hard_regs (int *pseudo_regnos, int n)
258 int i, k, p, regno, res, spill_class_size, hard_regno, nr;
259 enum reg_class rclass, spill_class;
260 machine_mode mode;
261 lra_live_range_t r;
262 rtx_insn *insn;
263 rtx set;
264 basic_block bb;
265 HARD_REG_SET conflict_hard_regs;
266 bitmap_head ok_insn_bitmap;
267 bitmap setjump_crosses = regstat_get_setjmp_crosses ();
268 /* Hard registers which can not be used for any purpose at given
269 program point because they are unallocatable or already allocated
270 for other pseudos. */
271 HARD_REG_SET *reserved_hard_regs;
273 if (! lra_reg_spill_p)
274 return n;
275 /* Set up reserved hard regs for every program point. */
276 reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point);
277 for (p = 0; p < lra_live_max_point; p++)
278 COPY_HARD_REG_SET (reserved_hard_regs[p], lra_no_alloc_regs);
279 for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
280 if (lra_reg_info[i].nrefs != 0
281 && (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
282 for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
283 for (p = r->start; p <= r->finish; p++)
284 add_to_hard_reg_set (&reserved_hard_regs[p],
285 lra_reg_info[i].biggest_mode, hard_regno);
286 bitmap_initialize (&ok_insn_bitmap, &reg_obstack);
287 FOR_EACH_BB_FN (bb, cfun)
288 FOR_BB_INSNS (bb, insn)
289 if (DEBUG_INSN_P (insn)
290 || ((set = single_set (insn)) != NULL_RTX
291 && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))))
292 bitmap_set_bit (&ok_insn_bitmap, INSN_UID (insn));
293 for (res = i = 0; i < n; i++)
295 regno = pseudo_regnos[i];
296 rclass = lra_get_allocno_class (regno);
297 if (bitmap_bit_p (setjump_crosses, regno)
298 || (spill_class
299 = ((enum reg_class)
300 targetm.spill_class ((reg_class_t) rclass,
301 PSEUDO_REGNO_MODE (regno)))) == NO_REGS
302 || bitmap_intersect_compl_p (&lra_reg_info[regno].insn_bitmap,
303 &ok_insn_bitmap))
305 pseudo_regnos[res++] = regno;
306 continue;
308 lra_assert (spill_class != NO_REGS);
309 COPY_HARD_REG_SET (conflict_hard_regs,
310 lra_reg_info[regno].conflict_hard_regs);
311 for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
312 for (p = r->start; p <= r->finish; p++)
313 IOR_HARD_REG_SET (conflict_hard_regs, reserved_hard_regs[p]);
314 spill_class_size = ira_class_hard_regs_num[spill_class];
315 mode = lra_reg_info[regno].biggest_mode;
316 for (k = 0; k < spill_class_size; k++)
318 hard_regno = ira_class_hard_regs[spill_class][k];
319 if (! overlaps_hard_reg_set_p (conflict_hard_regs, mode, hard_regno))
320 break;
322 if (k >= spill_class_size)
324 /* There is no available regs -- assign memory later. */
325 pseudo_regnos[res++] = regno;
326 continue;
328 if (lra_dump_file != NULL)
329 fprintf (lra_dump_file, " Spill r%d into hr%d\n", regno, hard_regno);
330 /* Update reserved_hard_regs. */
331 for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
332 for (p = r->start; p <= r->finish; p++)
333 add_to_hard_reg_set (&reserved_hard_regs[p],
334 lra_reg_info[regno].biggest_mode, hard_regno);
335 spill_hard_reg[regno]
336 = gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno);
337 for (nr = 0;
338 nr < hard_regno_nregs[hard_regno][lra_reg_info[regno].biggest_mode];
339 nr++)
340 /* Just loop. */
341 df_set_regs_ever_live (hard_regno + nr, true);
343 bitmap_clear (&ok_insn_bitmap);
344 free (reserved_hard_regs);
345 return res;
348 /* Add pseudo REGNO to slot SLOT_NUM. */
349 static void
350 add_pseudo_to_slot (int regno, int slot_num)
352 struct pseudo_slot *first;
354 if (slots[slot_num].regno < 0)
356 /* It is the first pseudo in the slot. */
357 slots[slot_num].regno = regno;
358 pseudo_slots[regno].first = &pseudo_slots[regno];
359 pseudo_slots[regno].next = NULL;
361 else
363 first = pseudo_slots[regno].first = &pseudo_slots[slots[slot_num].regno];
364 pseudo_slots[regno].next = first->next;
365 first->next = &pseudo_slots[regno];
367 pseudo_slots[regno].mem = NULL_RTX;
368 pseudo_slots[regno].slot_num = slot_num;
369 slots[slot_num].live_ranges
370 = lra_merge_live_ranges (slots[slot_num].live_ranges,
371 lra_copy_live_range_list
372 (lra_reg_info[regno].live_ranges));
375 /* Assign stack slot numbers to pseudos in array PSEUDO_REGNOS of
376 length N. Sort pseudos in PSEUDO_REGNOS for subsequent assigning
377 memory stack slots. */
378 static void
379 assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n)
381 int i, j, regno;
383 slots_num = 0;
384 /* Assign stack slot numbers to spilled pseudos, use smaller numbers
385 for most frequently used pseudos. */
386 for (i = 0; i < n; i++)
388 regno = pseudo_regnos[i];
389 if (! flag_ira_share_spill_slots)
390 j = slots_num;
391 else
393 for (j = 0; j < slots_num; j++)
394 if (slots[j].hard_regno < 0
395 && ! (lra_intersected_live_ranges_p
396 (slots[j].live_ranges,
397 lra_reg_info[regno].live_ranges)))
398 break;
400 if (j >= slots_num)
402 /* New slot. */
403 slots[j].live_ranges = NULL;
404 slots[j].regno = slots[j].hard_regno = -1;
405 slots[j].mem = NULL_RTX;
406 slots_num++;
408 add_pseudo_to_slot (regno, j);
410 /* Sort regnos according to their slot numbers. */
411 qsort (pseudo_regnos, n, sizeof (int), pseudo_reg_slot_compare);
414 /* Recursively process LOC in INSN and change spilled pseudos to the
415 corresponding memory or spilled hard reg. Ignore spilled pseudos
416 created from the scratches. */
417 static void
418 remove_pseudos (rtx *loc, rtx_insn *insn)
420 int i;
421 rtx hard_reg;
422 const char *fmt;
423 enum rtx_code code;
425 if (*loc == NULL_RTX)
426 return;
427 code = GET_CODE (*loc);
428 if (code == REG && (i = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER
429 && lra_get_regno_hard_regno (i) < 0
430 /* We do not want to assign memory for former scratches because
431 it might result in an address reload for some targets. In
432 any case we transform such pseudos not getting hard registers
433 into scratches back. */
434 && ! lra_former_scratch_p (i))
436 if ((hard_reg = spill_hard_reg[i]) != NULL_RTX)
437 *loc = copy_rtx (hard_reg);
438 else
440 rtx x = lra_eliminate_regs_1 (insn, pseudo_slots[i].mem,
441 GET_MODE (pseudo_slots[i].mem),
442 false, false, 0, true);
443 *loc = x != pseudo_slots[i].mem ? x : copy_rtx (x);
445 return;
448 fmt = GET_RTX_FORMAT (code);
449 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
451 if (fmt[i] == 'e')
452 remove_pseudos (&XEXP (*loc, i), insn);
453 else if (fmt[i] == 'E')
455 int j;
457 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
458 remove_pseudos (&XVECEXP (*loc, i, j), insn);
463 /* Convert spilled pseudos into their stack slots or spill hard regs,
464 put insns to process on the constraint stack (that is all insns in
465 which pseudos were changed to memory or spill hard regs). */
466 static void
467 spill_pseudos (void)
469 basic_block bb;
470 rtx_insn *insn;
471 int i;
472 bitmap_head spilled_pseudos, changed_insns;
474 bitmap_initialize (&spilled_pseudos, &reg_obstack);
475 bitmap_initialize (&changed_insns, &reg_obstack);
476 for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
478 if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
479 && ! lra_former_scratch_p (i))
481 bitmap_set_bit (&spilled_pseudos, i);
482 bitmap_ior_into (&changed_insns, &lra_reg_info[i].insn_bitmap);
485 FOR_EACH_BB_FN (bb, cfun)
487 FOR_BB_INSNS (bb, insn)
488 if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
490 rtx *link_loc, link;
491 remove_pseudos (&PATTERN (insn), insn);
492 if (CALL_P (insn))
493 remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn);
494 for (link_loc = &REG_NOTES (insn);
495 (link = *link_loc) != NULL_RTX;
496 link_loc = &XEXP (link, 1))
498 switch (REG_NOTE_KIND (link))
500 case REG_FRAME_RELATED_EXPR:
501 case REG_CFA_DEF_CFA:
502 case REG_CFA_ADJUST_CFA:
503 case REG_CFA_OFFSET:
504 case REG_CFA_REGISTER:
505 case REG_CFA_EXPRESSION:
506 case REG_CFA_RESTORE:
507 case REG_CFA_SET_VDRAP:
508 remove_pseudos (&XEXP (link, 0), insn);
509 break;
510 default:
511 break;
514 if (lra_dump_file != NULL)
515 fprintf (lra_dump_file,
516 "Changing spilled pseudos to memory in insn #%u\n",
517 INSN_UID (insn));
518 lra_push_insn (insn);
519 if (lra_reg_spill_p || targetm.different_addr_displacement_p ())
520 lra_set_used_insn_alternative (insn, -1);
522 else if (CALL_P (insn))
523 /* Presence of any pseudo in CALL_INSN_FUNCTION_USAGE does
524 not affect value of insn_bitmap of the corresponding
525 lra_reg_info. That is because we don't need to reload
526 pseudos in CALL_INSN_FUNCTION_USAGEs. So if we process
527 only insns in the insn_bitmap of given pseudo here, we
528 can miss the pseudo in some
529 CALL_INSN_FUNCTION_USAGEs. */
530 remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn);
531 bitmap_and_compl_into (df_get_live_in (bb), &spilled_pseudos);
532 bitmap_and_compl_into (df_get_live_out (bb), &spilled_pseudos);
534 bitmap_clear (&spilled_pseudos);
535 bitmap_clear (&changed_insns);
538 /* Return true if we need to change some pseudos into memory. */
539 bool
540 lra_need_for_spills_p (void)
542 int i; max_regno = max_reg_num ();
544 for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
545 if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
546 && ! lra_former_scratch_p (i))
547 return true;
548 return false;
551 /* Change spilled pseudos into memory or spill hard regs. Put changed
552 insns on the constraint stack (these insns will be considered on
553 the next constraint pass). The changed insns are all insns in
554 which pseudos were changed. */
555 void
556 lra_spill (void)
558 int i, n, curr_regno;
559 int *pseudo_regnos;
561 regs_num = max_reg_num ();
562 spill_hard_reg = XNEWVEC (rtx, regs_num);
563 pseudo_regnos = XNEWVEC (int, regs_num);
564 for (n = 0, i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
565 if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
566 /* We do not want to assign memory for former scratches. */
567 && ! lra_former_scratch_p (i))
569 spill_hard_reg[i] = NULL_RTX;
570 pseudo_regnos[n++] = i;
572 lra_assert (n > 0);
573 pseudo_slots = XNEWVEC (struct pseudo_slot, regs_num);
574 slots = XNEWVEC (struct slot, regs_num);
575 /* Sort regnos according their usage frequencies. */
576 qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare);
577 n = assign_spill_hard_regs (pseudo_regnos, n);
578 assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n);
579 for (i = 0; i < n; i++)
580 if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX)
581 assign_mem_slot (pseudo_regnos[i]);
582 if (n > 0 && crtl->stack_alignment_needed)
583 /* If we have a stack frame, we must align it now. The stack size
584 may be a part of the offset computation for register
585 elimination. */
586 assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
587 if (lra_dump_file != NULL)
589 for (i = 0; i < slots_num; i++)
591 fprintf (lra_dump_file, " Slot %d regnos (width = %d):", i,
592 GET_MODE_SIZE (GET_MODE (slots[i].mem)));
593 for (curr_regno = slots[i].regno;;
594 curr_regno = pseudo_slots[curr_regno].next - pseudo_slots)
596 fprintf (lra_dump_file, " %d", curr_regno);
597 if (pseudo_slots[curr_regno].next == NULL)
598 break;
600 fprintf (lra_dump_file, "\n");
603 spill_pseudos ();
604 free (slots);
605 free (pseudo_slots);
606 free (pseudo_regnos);
607 free (spill_hard_reg);
610 /* Apply alter_subreg for subregs of regs in *LOC. Use FINAL_P for
611 alter_subreg calls. Return true if any subreg of reg is
612 processed. */
613 static bool
614 alter_subregs (rtx *loc, bool final_p)
616 int i;
617 rtx x = *loc;
618 bool res;
619 const char *fmt;
620 enum rtx_code code;
622 if (x == NULL_RTX)
623 return false;
624 code = GET_CODE (x);
625 if (code == SUBREG && REG_P (SUBREG_REG (x)))
627 lra_assert (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER);
628 alter_subreg (loc, final_p);
629 return true;
631 fmt = GET_RTX_FORMAT (code);
632 res = false;
633 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
635 if (fmt[i] == 'e')
637 if (alter_subregs (&XEXP (x, i), final_p))
638 res = true;
640 else if (fmt[i] == 'E')
642 int j;
644 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
645 if (alter_subregs (&XVECEXP (x, i, j), final_p))
646 res = true;
649 return res;
652 /* Return true if REGNO is used for return in the current
653 function. */
654 static bool
655 return_regno_p (unsigned int regno)
657 rtx outgoing = crtl->return_rtx;
659 if (! outgoing)
660 return false;
662 if (REG_P (outgoing))
663 return REGNO (outgoing) == regno;
664 else if (GET_CODE (outgoing) == PARALLEL)
666 int i;
668 for (i = 0; i < XVECLEN (outgoing, 0); i++)
670 rtx x = XEXP (XVECEXP (outgoing, 0, i), 0);
672 if (REG_P (x) && REGNO (x) == regno)
673 return true;
676 return false;
679 /* Final change of pseudos got hard registers into the corresponding
680 hard registers and removing temporary clobbers. */
681 void
682 lra_final_code_change (void)
684 int i, hard_regno;
685 basic_block bb;
686 rtx_insn *insn, *curr;
687 int max_regno = max_reg_num ();
689 for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
690 if (lra_reg_info[i].nrefs != 0
691 && (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
692 SET_REGNO (regno_reg_rtx[i], hard_regno);
693 FOR_EACH_BB_FN (bb, cfun)
694 FOR_BB_INSNS_SAFE (bb, insn, curr)
695 if (INSN_P (insn))
697 rtx pat = PATTERN (insn);
699 if (GET_CODE (pat) == CLOBBER && LRA_TEMP_CLOBBER_P (pat))
701 /* Remove clobbers temporarily created in LRA. We don't
702 need them anymore and don't want to waste compiler
703 time processing them in a few subsequent passes. */
704 lra_invalidate_insn_data (insn);
705 delete_insn (insn);
706 continue;
709 /* IRA can generate move insns involving pseudos. It is
710 better remove them earlier to speed up compiler a bit.
711 It is also better to do it here as they might not pass
712 final RTL check in LRA, (e.g. insn moving a control
713 register into itself). So remove an useless move insn
714 unless next insn is USE marking the return reg (we should
715 save this as some subsequent optimizations assume that
716 such original insns are saved). */
717 if (NONJUMP_INSN_P (insn) && GET_CODE (pat) == SET
718 && REG_P (SET_SRC (pat)) && REG_P (SET_DEST (pat))
719 && REGNO (SET_SRC (pat)) == REGNO (SET_DEST (pat))
720 && ! return_regno_p (REGNO (SET_SRC (pat))))
722 lra_invalidate_insn_data (insn);
723 delete_insn (insn);
724 continue;
727 lra_insn_recog_data_t id = lra_get_insn_recog_data (insn);
728 struct lra_static_insn_data *static_id = id->insn_static_data;
729 bool insn_change_p = false;
731 for (i = id->insn_static_data->n_operands - 1; i >= 0; i--)
732 if ((DEBUG_INSN_P (insn) || ! static_id->operand[i].is_operator)
733 && alter_subregs (id->operand_loc[i], ! DEBUG_INSN_P (insn)))
735 lra_update_dup (id, i);
736 insn_change_p = true;
738 if (insn_change_p)
739 lra_update_operator_dups (id);