* config/msp430/msp430.c (msp430_asm_integer): Support addition
[official-gcc.git] / gcc / ira-emit.c
blobb8b8d253312916eee4bfbed49f84419d0d8981c0
1 /* Integrated Register Allocator. Changing code and generating moves.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* When we have more one region, we need to change the original RTL
22 code after coloring. Let us consider two allocnos representing the
23 same pseudo-register outside and inside a region respectively.
24 They can get different hard-registers. The reload pass works on
25 pseudo registers basis and there is no way to say the reload that
26 pseudo could be in different registers and it is even more
27 difficult to say in what places of the code the pseudo should have
28 particular hard-registers. So in this case IRA has to create and
29 use a new pseudo-register inside the region and adds code to move
30 allocno values on the region's borders. This is done by the code
31 in this file.
33 The code makes top-down traversal of the regions and generate new
34 pseudos and the move code on the region borders. In some
35 complicated cases IRA can create a new pseudo used temporarily to
36 move allocno values when a swap of values stored in two
37 hard-registers is needed (e.g. two allocnos representing different
38 pseudos outside region got respectively hard registers 1 and 2 and
39 the corresponding allocnos inside the region got respectively hard
40 registers 2 and 1). At this stage, the new pseudo is marked as
41 spilled.
43 IRA still creates the pseudo-register and the moves on the region
44 borders even when the both corresponding allocnos were assigned to
45 the same hard-register. It is done because, if the reload pass for
46 some reason spills a pseudo-register representing the original
47 pseudo outside or inside the region, the effect will be smaller
48 because another pseudo will still be in the hard-register. In most
49 cases, this is better then spilling the original pseudo in its
50 whole live-range. If reload does not change the allocation for the
51 two pseudo-registers, the trivial move will be removed by
52 post-reload optimizations.
54 IRA does not generate a new pseudo and moves for the allocno values
55 if the both allocnos representing an original pseudo inside and
56 outside region assigned to the same hard register when the register
57 pressure in the region for the corresponding pressure class is less
58 than number of available hard registers for given pressure class.
60 IRA also does some optimizations to remove redundant moves which is
61 transformed into stores by the reload pass on CFG edges
62 representing exits from the region.
64 IRA tries to reduce duplication of code generated on CFG edges
65 which are enters and exits to/from regions by moving some code to
66 the edge sources or destinations when it is possible. */
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "regs.h"
73 #include "rtl.h"
74 #include "tm_p.h"
75 #include "target.h"
76 #include "flags.h"
77 #include "obstack.h"
78 #include "bitmap.h"
79 #include "hard-reg-set.h"
80 #include "predict.h"
81 #include "vec.h"
82 #include "hashtab.h"
83 #include "hash-set.h"
84 #include "input.h"
85 #include "function.h"
86 #include "dominance.h"
87 #include "cfg.h"
88 #include "cfgrtl.h"
89 #include "cfgbuild.h"
90 #include "basic-block.h"
91 #include "symtab.h"
92 #include "statistics.h"
93 #include "alias.h"
94 #include "inchash.h"
95 #include "tree.h"
96 #include "insn-config.h"
97 #include "expmed.h"
98 #include "dojump.h"
99 #include "explow.h"
100 #include "calls.h"
101 #include "emit-rtl.h"
102 #include "varasm.h"
103 #include "stmt.h"
104 #include "expr.h"
105 #include "recog.h"
106 #include "params.h"
107 #include "reload.h"
108 #include "df.h"
109 #include "ira-int.h"
112 /* Data used to emit live range split insns and to flattening IR. */
113 ira_emit_data_t ira_allocno_emit_data;
115 /* Definitions for vectors of pointers. */
116 typedef void *void_p;
118 /* Pointers to data allocated for allocnos being created during
119 emitting. Usually there are quite few such allocnos because they
120 are created only for resolving loop in register shuffling. */
121 static vec<void_p> new_allocno_emit_data_vec;
123 /* Allocate and initiate the emit data. */
124 void
125 ira_initiate_emit_data (void)
127 ira_allocno_t a;
128 ira_allocno_iterator ai;
130 ira_allocno_emit_data
131 = (ira_emit_data_t) ira_allocate (ira_allocnos_num
132 * sizeof (struct ira_emit_data));
133 memset (ira_allocno_emit_data, 0,
134 ira_allocnos_num * sizeof (struct ira_emit_data));
135 FOR_EACH_ALLOCNO (a, ai)
136 ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
137 new_allocno_emit_data_vec.create (50);
141 /* Free the emit data. */
142 void
143 ira_finish_emit_data (void)
145 void_p p;
146 ira_allocno_t a;
147 ira_allocno_iterator ai;
149 ira_free (ira_allocno_emit_data);
150 FOR_EACH_ALLOCNO (a, ai)
151 ALLOCNO_ADD_DATA (a) = NULL;
152 for (;new_allocno_emit_data_vec.length () != 0;)
154 p = new_allocno_emit_data_vec.pop ();
155 ira_free (p);
157 new_allocno_emit_data_vec.release ();
160 /* Create and return a new allocno with given REGNO and
161 LOOP_TREE_NODE. Allocate emit data for it. */
162 static ira_allocno_t
163 create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
165 ira_allocno_t a;
167 a = ira_create_allocno (regno, false, loop_tree_node);
168 ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
169 memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
170 new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
171 return a;
176 /* See comments below. */
177 typedef struct move *move_t;
179 /* The structure represents an allocno move. Both allocnos have the
180 same original regno but different allocation. */
181 struct move
183 /* The allocnos involved in the move. */
184 ira_allocno_t from, to;
185 /* The next move in the move sequence. */
186 move_t next;
187 /* Used for finding dependencies. */
188 bool visited_p;
189 /* The size of the following array. */
190 int deps_num;
191 /* Moves on which given move depends on. Dependency can be cyclic.
192 It means we need a temporary to generates the moves. Sequence
193 A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
194 B1 are assigned to reg R2 is an example of the cyclic
195 dependencies. */
196 move_t *deps;
197 /* First insn generated for the move. */
198 rtx_insn *insn;
201 /* Array of moves (indexed by BB index) which should be put at the
202 start/end of the corresponding basic blocks. */
203 static move_t *at_bb_start, *at_bb_end;
205 /* Max regno before renaming some pseudo-registers. For example, the
206 same pseudo-register can be renamed in a loop if its allocation is
207 different outside the loop. */
208 static int max_regno_before_changing;
210 /* Return new move of allocnos TO and FROM. */
211 static move_t
212 create_move (ira_allocno_t to, ira_allocno_t from)
214 move_t move;
216 move = (move_t) ira_allocate (sizeof (struct move));
217 move->deps = NULL;
218 move->deps_num = 0;
219 move->to = to;
220 move->from = from;
221 move->next = NULL;
222 move->insn = NULL;
223 move->visited_p = false;
224 return move;
227 /* Free memory for MOVE and its dependencies. */
228 static void
229 free_move (move_t move)
231 if (move->deps != NULL)
232 ira_free (move->deps);
233 ira_free (move);
236 /* Free memory for list of the moves given by its HEAD. */
237 static void
238 free_move_list (move_t head)
240 move_t next;
242 for (; head != NULL; head = next)
244 next = head->next;
245 free_move (head);
249 /* Return TRUE if the move list LIST1 and LIST2 are equal (two
250 moves are equal if they involve the same allocnos). */
251 static bool
252 eq_move_lists_p (move_t list1, move_t list2)
254 for (; list1 != NULL && list2 != NULL;
255 list1 = list1->next, list2 = list2->next)
256 if (list1->from != list2->from || list1->to != list2->to)
257 return false;
258 return list1 == list2;
261 /* Print move list LIST into file F. */
262 static void
263 print_move_list (FILE *f, move_t list)
265 for (; list != NULL; list = list->next)
266 fprintf (f, " a%dr%d->a%dr%d",
267 ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
268 ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
269 fprintf (f, "\n");
272 extern void ira_debug_move_list (move_t list);
274 /* Print move list LIST into stderr. */
275 void
276 ira_debug_move_list (move_t list)
278 print_move_list (stderr, list);
281 /* This recursive function changes pseudo-registers in *LOC if it is
282 necessary. The function returns TRUE if a change was done. */
283 static bool
284 change_regs (rtx *loc)
286 int i, regno, result = false;
287 const char *fmt;
288 enum rtx_code code;
289 rtx reg;
291 if (*loc == NULL_RTX)
292 return false;
293 code = GET_CODE (*loc);
294 if (code == REG)
296 regno = REGNO (*loc);
297 if (regno < FIRST_PSEUDO_REGISTER)
298 return false;
299 if (regno >= max_regno_before_changing)
300 /* It is a shared register which was changed already. */
301 return false;
302 if (ira_curr_regno_allocno_map[regno] == NULL)
303 return false;
304 reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
305 if (reg == *loc)
306 return false;
307 *loc = reg;
308 return true;
311 fmt = GET_RTX_FORMAT (code);
312 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
314 if (fmt[i] == 'e')
315 result = change_regs (&XEXP (*loc, i)) || result;
316 else if (fmt[i] == 'E')
318 int j;
320 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
321 result = change_regs (&XVECEXP (*loc, i, j)) || result;
324 return result;
327 static bool
328 change_regs_in_insn (rtx_insn **insn_ptr)
330 rtx rtx = *insn_ptr;
331 bool result = change_regs (&rtx);
332 *insn_ptr = as_a <rtx_insn *> (rtx);
333 return result;
336 /* Attach MOVE to the edge E. The move is attached to the head of the
337 list if HEAD_P is TRUE. */
338 static void
339 add_to_edge_list (edge e, move_t move, bool head_p)
341 move_t last;
343 if (head_p || e->aux == NULL)
345 move->next = (move_t) e->aux;
346 e->aux = move;
348 else
350 for (last = (move_t) e->aux; last->next != NULL; last = last->next)
352 last->next = move;
353 move->next = NULL;
357 /* Create and return new pseudo-register with the same attributes as
358 ORIGINAL_REG. */
360 ira_create_new_reg (rtx original_reg)
362 rtx new_reg;
364 new_reg = gen_reg_rtx (GET_MODE (original_reg));
365 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
366 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
367 REG_POINTER (new_reg) = REG_POINTER (original_reg);
368 REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
369 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
370 fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n",
371 REGNO (new_reg), REGNO (original_reg));
372 ira_expand_reg_equiv ();
373 return new_reg;
376 /* Return TRUE if loop given by SUBNODE inside the loop given by
377 NODE. */
378 static bool
379 subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
381 for (; subnode != NULL; subnode = subnode->parent)
382 if (subnode == node)
383 return true;
384 return false;
387 /* Set up member `reg' to REG for allocnos which has the same regno as
388 ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
389 static void
390 set_allocno_reg (ira_allocno_t allocno, rtx reg)
392 int regno;
393 ira_allocno_t a;
394 ira_loop_tree_node_t node;
396 node = ALLOCNO_LOOP_TREE_NODE (allocno);
397 for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
398 a != NULL;
399 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
400 if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
401 ALLOCNO_EMIT_DATA (a)->reg = reg;
402 for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
403 ALLOCNO_EMIT_DATA (a)->reg = reg;
404 regno = ALLOCNO_REGNO (allocno);
405 for (a = allocno;;)
407 if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
409 node = node->parent;
410 if (node == NULL)
411 break;
412 a = node->regno_allocno_map[regno];
414 if (a == NULL)
415 continue;
416 if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
417 break;
418 ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
422 /* Return true if there is an entry to given loop not from its parent
423 (or grandparent) block. For example, it is possible for two
424 adjacent loops inside another loop. */
425 static bool
426 entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
428 ira_loop_tree_node_t bb_node, src_loop_node, parent;
429 edge e;
430 edge_iterator ei;
432 for (bb_node = loop_node->children;
433 bb_node != NULL;
434 bb_node = bb_node->next)
435 if (bb_node->bb != NULL)
437 FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
438 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
439 && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
441 for (parent = src_loop_node->parent;
442 parent != NULL;
443 parent = parent->parent)
444 if (parent == loop_node)
445 break;
446 if (parent != NULL)
447 /* That is an exit from a nested loop -- skip it. */
448 continue;
449 for (parent = loop_node->parent;
450 parent != NULL;
451 parent = parent->parent)
452 if (src_loop_node == parent)
453 break;
454 if (parent == NULL)
455 return true;
458 return false;
461 /* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */
462 static void
463 setup_entered_from_non_parent_p (void)
465 unsigned int i;
466 loop_p loop;
468 ira_assert (current_loops != NULL);
469 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
470 if (ira_loop_nodes[i].regno_allocno_map != NULL)
471 ira_loop_nodes[i].entered_from_non_parent_p
472 = entered_from_non_parent_p (&ira_loop_nodes[i]);
475 /* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
476 DEST_ALLOCNO (assigned to memory) can be removed because it does
477 not change value of the destination. One possible reason for this
478 is the situation when SRC_ALLOCNO is not modified in the
479 corresponding loop. */
480 static bool
481 store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
483 int regno, orig_regno;
484 ira_allocno_t a;
485 ira_loop_tree_node_t node;
487 ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
488 && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
489 orig_regno = ALLOCNO_REGNO (src_allocno);
490 regno = REGNO (allocno_emit_reg (dest_allocno));
491 for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
492 node != NULL;
493 node = node->parent)
495 a = node->regno_allocno_map[orig_regno];
496 ira_assert (a != NULL);
497 if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
498 /* We achieved the destination and everything is ok. */
499 return true;
500 else if (bitmap_bit_p (node->modified_regnos, orig_regno))
501 return false;
502 else if (node->entered_from_non_parent_p)
503 /* If there is a path from a destination loop block to the
504 source loop header containing basic blocks of non-parents
505 (grandparents) of the source loop, we should have checked
506 modifications of the pseudo on this path too to decide
507 about possibility to remove the store. It could be done by
508 solving a data-flow problem. Unfortunately such global
509 solution would complicate IR flattening. Therefore we just
510 prohibit removal of the store in such complicated case. */
511 return false;
513 /* It is actually a loop entry -- do not remove the store. */
514 return false;
517 /* Generate and attach moves to the edge E. This looks at the final
518 regnos of allocnos living on the edge with the same original regno
519 to figure out when moves should be generated. */
520 static void
521 generate_edge_moves (edge e)
523 ira_loop_tree_node_t src_loop_node, dest_loop_node;
524 unsigned int regno;
525 bitmap_iterator bi;
526 ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
527 move_t move;
528 bitmap regs_live_in_dest, regs_live_out_src;
530 src_loop_node = IRA_BB_NODE (e->src)->parent;
531 dest_loop_node = IRA_BB_NODE (e->dest)->parent;
532 e->aux = NULL;
533 if (src_loop_node == dest_loop_node)
534 return;
535 src_map = src_loop_node->regno_allocno_map;
536 dest_map = dest_loop_node->regno_allocno_map;
537 regs_live_in_dest = df_get_live_in (e->dest);
538 regs_live_out_src = df_get_live_out (e->src);
539 EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
540 FIRST_PSEUDO_REGISTER, regno, bi)
541 if (bitmap_bit_p (regs_live_out_src, regno))
543 src_allocno = src_map[regno];
544 dest_allocno = dest_map[regno];
545 if (REGNO (allocno_emit_reg (src_allocno))
546 == REGNO (allocno_emit_reg (dest_allocno)))
547 continue;
548 /* Remove unnecessary stores at the region exit. We should do
549 this for readonly memory for sure and this is guaranteed by
550 that we never generate moves on region borders (see
551 checking in function change_loop). */
552 if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
553 && ALLOCNO_HARD_REGNO (src_allocno) >= 0
554 && store_can_be_removed_p (src_allocno, dest_allocno))
556 ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
557 ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
558 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
559 fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
560 regno, ALLOCNO_NUM (src_allocno),
561 ALLOCNO_NUM (dest_allocno));
562 continue;
564 move = create_move (dest_allocno, src_allocno);
565 add_to_edge_list (e, move, true);
569 /* Bitmap of allocnos local for the current loop. */
570 static bitmap local_allocno_bitmap;
572 /* This bitmap is used to find that we need to generate and to use a
573 new pseudo-register when processing allocnos with the same original
574 regno. */
575 static bitmap used_regno_bitmap;
577 /* This bitmap contains regnos of allocnos which were renamed locally
578 because the allocnos correspond to disjoint live ranges in loops
579 with a common parent. */
580 static bitmap renamed_regno_bitmap;
582 /* Change (if necessary) pseudo-registers inside loop given by loop
583 tree node NODE. */
584 static void
585 change_loop (ira_loop_tree_node_t node)
587 bitmap_iterator bi;
588 unsigned int i;
589 int regno;
590 bool used_p;
591 ira_allocno_t allocno, parent_allocno, *map;
592 rtx_insn *insn;
593 rtx original_reg;
594 enum reg_class aclass, pclass;
595 ira_loop_tree_node_t parent;
597 if (node != ira_loop_tree_root)
599 ira_assert (current_loops != NULL);
601 if (node->bb != NULL)
603 FOR_BB_INSNS (node->bb, insn)
604 if (INSN_P (insn) && change_regs_in_insn (&insn))
606 df_insn_rescan (insn);
607 df_notes_rescan (insn);
609 return;
612 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
613 fprintf (ira_dump_file,
614 " Changing RTL for loop %d (header bb%d)\n",
615 node->loop_num, node->loop->header->index);
617 parent = ira_curr_loop_tree_node->parent;
618 map = parent->regno_allocno_map;
619 EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
620 0, i, bi)
622 allocno = ira_allocnos[i];
623 regno = ALLOCNO_REGNO (allocno);
624 aclass = ALLOCNO_CLASS (allocno);
625 pclass = ira_pressure_class_translate[aclass];
626 parent_allocno = map[regno];
627 ira_assert (regno < ira_reg_equiv_len);
628 /* We generate the same hard register move because the
629 reload pass can put an allocno into memory in this case
630 we will have live range splitting. If it does not happen
631 such the same hard register moves will be removed. The
632 worst case when the both allocnos are put into memory by
633 the reload is very rare. */
634 if (parent_allocno != NULL
635 && (ALLOCNO_HARD_REGNO (allocno)
636 == ALLOCNO_HARD_REGNO (parent_allocno))
637 && (ALLOCNO_HARD_REGNO (allocno) < 0
638 || (parent->reg_pressure[pclass] + 1
639 <= ira_class_hard_regs_num[pclass])
640 || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
641 [ALLOCNO_MODE (allocno)],
642 ALLOCNO_HARD_REGNO (allocno))
643 /* don't create copies because reload can spill an
644 allocno set by copy although the allocno will not
645 get memory slot. */
646 || ira_equiv_no_lvalue_p (regno)
647 || (pic_offset_table_rtx != NULL
648 && (ALLOCNO_REGNO (allocno)
649 == (int) REGNO (pic_offset_table_rtx)))))
650 continue;
651 original_reg = allocno_emit_reg (allocno);
652 if (parent_allocno == NULL
653 || (REGNO (allocno_emit_reg (parent_allocno))
654 == REGNO (original_reg)))
656 if (internal_flag_ira_verbose > 3 && ira_dump_file)
657 fprintf (ira_dump_file, " %i vs parent %i:",
658 ALLOCNO_HARD_REGNO (allocno),
659 ALLOCNO_HARD_REGNO (parent_allocno));
660 set_allocno_reg (allocno, ira_create_new_reg (original_reg));
664 /* Rename locals: Local allocnos with same regno in different loops
665 might get the different hard register. So we need to change
666 ALLOCNO_REG. */
667 bitmap_and_compl (local_allocno_bitmap,
668 ira_curr_loop_tree_node->all_allocnos,
669 ira_curr_loop_tree_node->border_allocnos);
670 EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
672 allocno = ira_allocnos[i];
673 regno = ALLOCNO_REGNO (allocno);
674 if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
675 continue;
676 used_p = !bitmap_set_bit (used_regno_bitmap, regno);
677 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
678 if (! used_p)
679 continue;
680 bitmap_set_bit (renamed_regno_bitmap, regno);
681 set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
685 /* Process to set up flag somewhere_renamed_p. */
686 static void
687 set_allocno_somewhere_renamed_p (void)
689 unsigned int regno;
690 ira_allocno_t allocno;
691 ira_allocno_iterator ai;
693 FOR_EACH_ALLOCNO (allocno, ai)
695 regno = ALLOCNO_REGNO (allocno);
696 if (bitmap_bit_p (renamed_regno_bitmap, regno)
697 && REGNO (allocno_emit_reg (allocno)) == regno)
698 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
702 /* Return TRUE if move lists on all edges given in vector VEC are
703 equal. */
704 static bool
705 eq_edge_move_lists_p (vec<edge, va_gc> *vec)
707 move_t list;
708 int i;
710 list = (move_t) EDGE_I (vec, 0)->aux;
711 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
712 if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
713 return false;
714 return true;
717 /* Look at all entry edges (if START_P) or exit edges of basic block
718 BB and put move lists at the BB start or end if it is possible. In
719 other words, this decreases code duplication of allocno moves. */
720 static void
721 unify_moves (basic_block bb, bool start_p)
723 int i;
724 edge e;
725 move_t list;
726 vec<edge, va_gc> *vec;
728 vec = (start_p ? bb->preds : bb->succs);
729 if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
730 return;
731 e = EDGE_I (vec, 0);
732 list = (move_t) e->aux;
733 if (! start_p && control_flow_insn_p (BB_END (bb)))
734 return;
735 e->aux = NULL;
736 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
738 e = EDGE_I (vec, i);
739 free_move_list ((move_t) e->aux);
740 e->aux = NULL;
742 if (start_p)
743 at_bb_start[bb->index] = list;
744 else
745 at_bb_end[bb->index] = list;
748 /* Last move (in move sequence being processed) setting up the
749 corresponding hard register. */
750 static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
752 /* If the element value is equal to CURR_TICK then the corresponding
753 element in `hard_regno_last_set' is defined and correct. */
754 static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
756 /* Last move (in move sequence being processed) setting up the
757 corresponding allocno. */
758 static move_t *allocno_last_set;
760 /* If the element value is equal to CURR_TICK then the corresponding
761 element in . `allocno_last_set' is defined and correct. */
762 static int *allocno_last_set_check;
764 /* Definition of vector of moves. */
766 /* This vec contains moves sorted topologically (depth-first) on their
767 dependency graph. */
768 static vec<move_t> move_vec;
770 /* The variable value is used to check correctness of values of
771 elements of arrays `hard_regno_last_set' and
772 `allocno_last_set_check'. */
773 static int curr_tick;
775 /* This recursive function traverses dependencies of MOVE and produces
776 topological sorting (in depth-first order). */
777 static void
778 traverse_moves (move_t move)
780 int i;
782 if (move->visited_p)
783 return;
784 move->visited_p = true;
785 for (i = move->deps_num - 1; i >= 0; i--)
786 traverse_moves (move->deps[i]);
787 move_vec.safe_push (move);
790 /* Remove unnecessary moves in the LIST, makes topological sorting,
791 and removes cycles on hard reg dependencies by introducing new
792 allocnos assigned to memory and additional moves. It returns the
793 result move list. */
794 static move_t
795 modify_move_list (move_t list)
797 int i, n, nregs, hard_regno;
798 ira_allocno_t to, from;
799 move_t move, new_move, set_move, first, last;
801 if (list == NULL)
802 return NULL;
803 /* Create move deps. */
804 curr_tick++;
805 for (move = list; move != NULL; move = move->next)
807 to = move->to;
808 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
809 continue;
810 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
811 for (i = 0; i < nregs; i++)
813 hard_regno_last_set[hard_regno + i] = move;
814 hard_regno_last_set_check[hard_regno + i] = curr_tick;
817 for (move = list; move != NULL; move = move->next)
819 from = move->from;
820 to = move->to;
821 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
823 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
824 for (n = i = 0; i < nregs; i++)
825 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
826 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
827 != ALLOCNO_REGNO (from)))
828 n++;
829 move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
830 for (n = i = 0; i < nregs; i++)
831 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
832 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
833 != ALLOCNO_REGNO (from)))
834 move->deps[n++] = hard_regno_last_set[hard_regno + i];
835 move->deps_num = n;
838 /* Topological sorting: */
839 move_vec.truncate (0);
840 for (move = list; move != NULL; move = move->next)
841 traverse_moves (move);
842 last = NULL;
843 for (i = (int) move_vec.length () - 1; i >= 0; i--)
845 move = move_vec[i];
846 move->next = NULL;
847 if (last != NULL)
848 last->next = move;
849 last = move;
851 first = move_vec.last ();
852 /* Removing cycles: */
853 curr_tick++;
854 move_vec.truncate (0);
855 for (move = first; move != NULL; move = move->next)
857 from = move->from;
858 to = move->to;
859 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
861 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
862 for (i = 0; i < nregs; i++)
863 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
864 && ALLOCNO_HARD_REGNO
865 (hard_regno_last_set[hard_regno + i]->to) >= 0)
867 int n, j;
868 ira_allocno_t new_allocno;
870 set_move = hard_regno_last_set[hard_regno + i];
871 /* It does not matter what loop_tree_node (of TO or
872 FROM) to use for the new allocno because of
873 subsequent IRA internal representation
874 flattening. */
875 new_allocno
876 = create_new_allocno (ALLOCNO_REGNO (set_move->to),
877 ALLOCNO_LOOP_TREE_NODE (set_move->to));
878 ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
879 ira_set_allocno_class (new_allocno,
880 ALLOCNO_CLASS (set_move->to));
881 ira_create_allocno_objects (new_allocno);
882 ALLOCNO_ASSIGNED_P (new_allocno) = true;
883 ALLOCNO_HARD_REGNO (new_allocno) = -1;
884 ALLOCNO_EMIT_DATA (new_allocno)->reg
885 = ira_create_new_reg (allocno_emit_reg (set_move->to));
887 /* Make it possibly conflicting with all earlier
888 created allocnos. Cases where temporary allocnos
889 created to remove the cycles are quite rare. */
890 n = ALLOCNO_NUM_OBJECTS (new_allocno);
891 gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
892 for (j = 0; j < n; j++)
894 ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
896 OBJECT_MIN (new_obj) = 0;
897 OBJECT_MAX (new_obj) = ira_objects_num - 1;
900 new_move = create_move (set_move->to, new_allocno);
901 set_move->to = new_allocno;
902 move_vec.safe_push (new_move);
903 ira_move_loops_num++;
904 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
905 fprintf (ira_dump_file,
906 " Creating temporary allocno a%dr%d\n",
907 ALLOCNO_NUM (new_allocno),
908 REGNO (allocno_emit_reg (new_allocno)));
911 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
912 continue;
913 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
914 for (i = 0; i < nregs; i++)
916 hard_regno_last_set[hard_regno + i] = move;
917 hard_regno_last_set_check[hard_regno + i] = curr_tick;
920 for (i = (int) move_vec.length () - 1; i >= 0; i--)
922 move = move_vec[i];
923 move->next = NULL;
924 last->next = move;
925 last = move;
927 return first;
930 /* Generate RTX move insns from the move list LIST. This updates
931 allocation cost using move execution frequency FREQ. */
932 static rtx_insn *
933 emit_move_list (move_t list, int freq)
935 rtx to, from, dest;
936 int to_regno, from_regno, cost, regno;
937 rtx_insn *result, *insn;
938 rtx set;
939 machine_mode mode;
940 enum reg_class aclass;
942 grow_reg_equivs ();
943 start_sequence ();
944 for (; list != NULL; list = list->next)
946 start_sequence ();
947 to = allocno_emit_reg (list->to);
948 to_regno = REGNO (to);
949 from = allocno_emit_reg (list->from);
950 from_regno = REGNO (from);
951 emit_move_insn (to, from);
952 list->insn = get_insns ();
953 end_sequence ();
954 for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
956 /* The reload needs to have set up insn codes. If the
957 reload sets up insn codes by itself, it may fail because
958 insns will have hard registers instead of pseudos and
959 there may be no machine insn with given hard
960 registers. */
961 recog_memoized (insn);
962 /* Add insn to equiv init insn list if it is necessary.
963 Otherwise reload will not remove this insn if it decides
964 to use the equivalence. */
965 if ((set = single_set (insn)) != NULL_RTX)
967 dest = SET_DEST (set);
968 if (GET_CODE (dest) == SUBREG)
969 dest = SUBREG_REG (dest);
970 ira_assert (REG_P (dest));
971 regno = REGNO (dest);
972 if (regno >= ira_reg_equiv_len
973 || (ira_reg_equiv[regno].invariant == NULL_RTX
974 && ira_reg_equiv[regno].constant == NULL_RTX))
975 continue; /* regno has no equivalence. */
976 ira_assert ((int) reg_equivs->length () > regno);
977 reg_equiv_init (regno)
978 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
981 if (ira_use_lra_p)
982 ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
983 emit_insn (list->insn);
984 mode = ALLOCNO_MODE (list->to);
985 aclass = ALLOCNO_CLASS (list->to);
986 cost = 0;
987 if (ALLOCNO_HARD_REGNO (list->to) < 0)
989 if (ALLOCNO_HARD_REGNO (list->from) >= 0)
991 cost = ira_memory_move_cost[mode][aclass][0] * freq;
992 ira_store_cost += cost;
995 else if (ALLOCNO_HARD_REGNO (list->from) < 0)
997 if (ALLOCNO_HARD_REGNO (list->to) >= 0)
999 cost = ira_memory_move_cost[mode][aclass][0] * freq;
1000 ira_load_cost += cost;
1003 else
1005 ira_init_register_move_cost_if_necessary (mode);
1006 cost = ira_register_move_cost[mode][aclass][aclass] * freq;
1007 ira_shuffle_cost += cost;
1009 ira_overall_cost += cost;
1011 result = get_insns ();
1012 end_sequence ();
1013 return result;
1016 /* Generate RTX move insns from move lists attached to basic blocks
1017 and edges. */
1018 static void
1019 emit_moves (void)
1021 basic_block bb;
1022 edge_iterator ei;
1023 edge e;
1024 rtx_insn *insns, *tmp;
1026 FOR_EACH_BB_FN (bb, cfun)
1028 if (at_bb_start[bb->index] != NULL)
1030 at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1031 insns = emit_move_list (at_bb_start[bb->index],
1032 REG_FREQ_FROM_BB (bb));
1033 tmp = BB_HEAD (bb);
1034 if (LABEL_P (tmp))
1035 tmp = NEXT_INSN (tmp);
1036 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1037 tmp = NEXT_INSN (tmp);
1038 if (tmp == BB_HEAD (bb))
1039 emit_insn_before (insns, tmp);
1040 else if (tmp != NULL_RTX)
1041 emit_insn_after (insns, PREV_INSN (tmp));
1042 else
1043 emit_insn_after (insns, get_last_insn ());
1046 if (at_bb_end[bb->index] != NULL)
1048 at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1049 insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1050 ira_assert (! control_flow_insn_p (BB_END (bb)));
1051 emit_insn_after (insns, BB_END (bb));
1054 FOR_EACH_EDGE (e, ei, bb->succs)
1056 if (e->aux == NULL)
1057 continue;
1058 ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1059 || ! EDGE_CRITICAL_P (e));
1060 e->aux = modify_move_list ((move_t) e->aux);
1061 insert_insn_on_edge
1062 (emit_move_list ((move_t) e->aux,
1063 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1065 if (e->src->next_bb != e->dest)
1066 ira_additional_jumps_num++;
1071 /* Update costs of A and corresponding allocnos on upper levels on the
1072 loop tree from reading (if READ_P) or writing A on an execution
1073 path with FREQ. */
1074 static void
1075 update_costs (ira_allocno_t a, bool read_p, int freq)
1077 ira_loop_tree_node_t parent;
1079 for (;;)
1081 ALLOCNO_NREFS (a)++;
1082 ALLOCNO_FREQ (a) += freq;
1083 ALLOCNO_MEMORY_COST (a)
1084 += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
1085 [read_p ? 1 : 0] * freq);
1086 if (ALLOCNO_CAP (a) != NULL)
1087 a = ALLOCNO_CAP (a);
1088 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1089 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1090 break;
1094 /* Process moves from LIST with execution FREQ to add ranges, copies,
1095 and modify costs for allocnos involved in the moves. All regnos
1096 living through the list is in LIVE_THROUGH, and the loop tree node
1097 used to find corresponding allocnos is NODE. */
1098 static void
1099 add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1100 bitmap live_through, int freq)
1102 int start, n;
1103 unsigned int regno;
1104 move_t move;
1105 ira_allocno_t a;
1106 ira_copy_t cp;
1107 live_range_t r;
1108 bitmap_iterator bi;
1109 HARD_REG_SET hard_regs_live;
1111 if (list == NULL)
1112 return;
1113 n = 0;
1114 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1115 n++;
1116 REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1117 /* This is a trick to guarantee that new ranges is not merged with
1118 the old ones. */
1119 ira_max_point++;
1120 start = ira_max_point;
1121 for (move = list; move != NULL; move = move->next)
1123 ira_allocno_t from = move->from;
1124 ira_allocno_t to = move->to;
1125 int nr, i;
1127 bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1128 bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
1130 nr = ALLOCNO_NUM_OBJECTS (to);
1131 for (i = 0; i < nr; i++)
1133 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1134 if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1136 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1137 fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
1138 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
1139 ira_allocate_object_conflicts (to_obj, n);
1142 ior_hard_reg_conflicts (from, &hard_regs_live);
1143 ior_hard_reg_conflicts (to, &hard_regs_live);
1145 update_costs (from, true, freq);
1146 update_costs (to, false, freq);
1147 cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
1148 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1149 fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
1150 cp->num, ALLOCNO_NUM (cp->first),
1151 REGNO (allocno_emit_reg (cp->first)),
1152 ALLOCNO_NUM (cp->second),
1153 REGNO (allocno_emit_reg (cp->second)));
1155 nr = ALLOCNO_NUM_OBJECTS (from);
1156 for (i = 0; i < nr; i++)
1158 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1159 r = OBJECT_LIVE_RANGES (from_obj);
1160 if (r == NULL || r->finish >= 0)
1162 ira_add_live_range_to_object (from_obj, start, ira_max_point);
1163 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1164 fprintf (ira_dump_file,
1165 " Adding range [%d..%d] to allocno a%dr%d\n",
1166 start, ira_max_point, ALLOCNO_NUM (from),
1167 REGNO (allocno_emit_reg (from)));
1169 else
1171 r->finish = ira_max_point;
1172 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1173 fprintf (ira_dump_file,
1174 " Adding range [%d..%d] to allocno a%dr%d\n",
1175 r->start, ira_max_point, ALLOCNO_NUM (from),
1176 REGNO (allocno_emit_reg (from)));
1179 ira_max_point++;
1180 nr = ALLOCNO_NUM_OBJECTS (to);
1181 for (i = 0; i < nr; i++)
1183 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1184 ira_add_live_range_to_object (to_obj, ira_max_point, -1);
1186 ira_max_point++;
1188 for (move = list; move != NULL; move = move->next)
1190 int nr, i;
1191 nr = ALLOCNO_NUM_OBJECTS (move->to);
1192 for (i = 0; i < nr; i++)
1194 ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1195 r = OBJECT_LIVE_RANGES (to_obj);
1196 if (r->finish < 0)
1198 r->finish = ira_max_point - 1;
1199 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1200 fprintf (ira_dump_file,
1201 " Adding range [%d..%d] to allocno a%dr%d\n",
1202 r->start, r->finish, ALLOCNO_NUM (move->to),
1203 REGNO (allocno_emit_reg (move->to)));
1207 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1209 ira_allocno_t to;
1210 int nr, i;
1212 a = node->regno_allocno_map[regno];
1213 if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
1214 a = to;
1215 nr = ALLOCNO_NUM_OBJECTS (a);
1216 for (i = 0; i < nr; i++)
1218 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1219 ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1221 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1222 fprintf
1223 (ira_dump_file,
1224 " Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1225 start, ira_max_point - 1,
1226 to != NULL ? "upper level" : "",
1227 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
1231 /* Process all move list to add ranges, conflicts, copies, and modify
1232 costs for allocnos involved in the moves. */
1233 static void
1234 add_ranges_and_copies (void)
1236 basic_block bb;
1237 edge_iterator ei;
1238 edge e;
1239 ira_loop_tree_node_t node;
1240 bitmap live_through;
1242 live_through = ira_allocate_bitmap ();
1243 FOR_EACH_BB_FN (bb, cfun)
1245 /* It does not matter what loop_tree_node (of source or
1246 destination block) to use for searching allocnos by their
1247 regnos because of subsequent IR flattening. */
1248 node = IRA_BB_NODE (bb)->parent;
1249 bitmap_copy (live_through, df_get_live_in (bb));
1250 add_range_and_copies_from_move_list
1251 (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1252 bitmap_copy (live_through, df_get_live_out (bb));
1253 add_range_and_copies_from_move_list
1254 (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1255 FOR_EACH_EDGE (e, ei, bb->succs)
1257 bitmap_and (live_through,
1258 df_get_live_in (e->dest), df_get_live_out (bb));
1259 add_range_and_copies_from_move_list
1260 ((move_t) e->aux, node, live_through,
1261 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1264 ira_free_bitmap (live_through);
1267 /* The entry function changes code and generates shuffling allocnos on
1268 region borders for the regional (LOOPS_P is TRUE in this case)
1269 register allocation. */
1270 void
1271 ira_emit (bool loops_p)
1273 basic_block bb;
1274 rtx_insn *insn;
1275 edge_iterator ei;
1276 edge e;
1277 ira_allocno_t a;
1278 ira_allocno_iterator ai;
1279 size_t sz;
1281 FOR_EACH_ALLOCNO (a, ai)
1282 ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
1283 if (! loops_p)
1284 return;
1285 sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1286 at_bb_start = (move_t *) ira_allocate (sz);
1287 memset (at_bb_start, 0, sz);
1288 at_bb_end = (move_t *) ira_allocate (sz);
1289 memset (at_bb_end, 0, sz);
1290 local_allocno_bitmap = ira_allocate_bitmap ();
1291 used_regno_bitmap = ira_allocate_bitmap ();
1292 renamed_regno_bitmap = ira_allocate_bitmap ();
1293 max_regno_before_changing = max_reg_num ();
1294 ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1295 set_allocno_somewhere_renamed_p ();
1296 ira_free_bitmap (used_regno_bitmap);
1297 ira_free_bitmap (renamed_regno_bitmap);
1298 ira_free_bitmap (local_allocno_bitmap);
1299 setup_entered_from_non_parent_p ();
1300 FOR_EACH_BB_FN (bb, cfun)
1302 at_bb_start[bb->index] = NULL;
1303 at_bb_end[bb->index] = NULL;
1304 FOR_EACH_EDGE (e, ei, bb->succs)
1305 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1306 generate_edge_moves (e);
1308 allocno_last_set
1309 = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1310 allocno_last_set_check
1311 = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1312 memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1313 memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1314 curr_tick = 0;
1315 FOR_EACH_BB_FN (bb, cfun)
1316 unify_moves (bb, true);
1317 FOR_EACH_BB_FN (bb, cfun)
1318 unify_moves (bb, false);
1319 move_vec.create (ira_allocnos_num);
1320 emit_moves ();
1321 add_ranges_and_copies ();
1322 /* Clean up: */
1323 FOR_EACH_BB_FN (bb, cfun)
1325 free_move_list (at_bb_start[bb->index]);
1326 free_move_list (at_bb_end[bb->index]);
1327 FOR_EACH_EDGE (e, ei, bb->succs)
1329 free_move_list ((move_t) e->aux);
1330 e->aux = NULL;
1333 move_vec.release ();
1334 ira_free (allocno_last_set_check);
1335 ira_free (allocno_last_set);
1336 commit_edge_insertions ();
1337 /* Fix insn codes. It is necessary to do it before reload because
1338 reload assumes initial insn codes defined. The insn codes can be
1339 invalidated by CFG infrastructure for example in jump
1340 redirection. */
1341 FOR_EACH_BB_FN (bb, cfun)
1342 FOR_BB_INSNS_REVERSE (bb, insn)
1343 if (INSN_P (insn))
1344 recog_memoized (insn);
1345 ira_free (at_bb_end);
1346 ira_free (at_bb_start);