2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / ira-emit.c
blob54804eb2c51d099202f5e602026b2bfe90a84a12
1 /* Integrated Register Allocator. Changing code and generating moves.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* When we have more one region, we need to change the original RTL
22 code after coloring. Let us consider two allocnos representing the
23 same pseudo-register outside and inside a region respectively.
24 They can get different hard-registers. The reload pass works on
25 pseudo registers basis and there is no way to say the reload that
26 pseudo could be in different registers and it is even more
27 difficult to say in what places of the code the pseudo should have
28 particular hard-registers. So in this case IRA has to create and
29 use a new pseudo-register inside the region and adds code to move
30 allocno values on the region's borders. This is done by the code
31 in this file.
33 The code makes top-down traversal of the regions and generate new
34 pseudos and the move code on the region borders. In some
35 complicated cases IRA can create a new pseudo used temporarily to
36 move allocno values when a swap of values stored in two
37 hard-registers is needed (e.g. two allocnos representing different
38 pseudos outside region got respectively hard registers 1 and 2 and
39 the corresponding allocnos inside the region got respectively hard
40 registers 2 and 1). At this stage, the new pseudo is marked as
41 spilled.
43 IRA still creates the pseudo-register and the moves on the region
44 borders even when the both corresponding allocnos were assigned to
45 the same hard-register. It is done because, if the reload pass for
46 some reason spills a pseudo-register representing the original
47 pseudo outside or inside the region, the effect will be smaller
48 because another pseudo will still be in the hard-register. In most
49 cases, this is better then spilling the original pseudo in its
50 whole live-range. If reload does not change the allocation for the
51 two pseudo-registers, the trivial move will be removed by
52 post-reload optimizations.
54 IRA does not generate a new pseudo and moves for the allocno values
55 if the both allocnos representing an original pseudo inside and
56 outside region assigned to the same hard register when the register
57 pressure in the region for the corresponding pressure class is less
58 than number of available hard registers for given pressure class.
60 IRA also does some optimizations to remove redundant moves which is
61 transformed into stores by the reload pass on CFG edges
62 representing exits from the region.
64 IRA tries to reduce duplication of code generated on CFG edges
65 which are enters and exits to/from regions by moving some code to
66 the edge sources or destinations when it is possible. */
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "regs.h"
73 #include "rtl.h"
74 #include "tm_p.h"
75 #include "target.h"
76 #include "flags.h"
77 #include "obstack.h"
78 #include "bitmap.h"
79 #include "hard-reg-set.h"
80 #include "predict.h"
81 #include "input.h"
82 #include "function.h"
83 #include "dominance.h"
84 #include "cfg.h"
85 #include "cfgrtl.h"
86 #include "cfgbuild.h"
87 #include "basic-block.h"
88 #include "symtab.h"
89 #include "alias.h"
90 #include "tree.h"
91 #include "insn-config.h"
92 #include "expmed.h"
93 #include "dojump.h"
94 #include "explow.h"
95 #include "calls.h"
96 #include "emit-rtl.h"
97 #include "varasm.h"
98 #include "stmt.h"
99 #include "expr.h"
100 #include "recog.h"
101 #include "params.h"
102 #include "reload.h"
103 #include "df.h"
104 #include "ira-int.h"
107 /* Data used to emit live range split insns and to flattening IR. */
108 ira_emit_data_t ira_allocno_emit_data;
110 /* Definitions for vectors of pointers. */
111 typedef void *void_p;
113 /* Pointers to data allocated for allocnos being created during
114 emitting. Usually there are quite few such allocnos because they
115 are created only for resolving loop in register shuffling. */
116 static vec<void_p> new_allocno_emit_data_vec;
118 /* Allocate and initiate the emit data. */
119 void
120 ira_initiate_emit_data (void)
122 ira_allocno_t a;
123 ira_allocno_iterator ai;
125 ira_allocno_emit_data
126 = (ira_emit_data_t) ira_allocate (ira_allocnos_num
127 * sizeof (struct ira_emit_data));
128 memset (ira_allocno_emit_data, 0,
129 ira_allocnos_num * sizeof (struct ira_emit_data));
130 FOR_EACH_ALLOCNO (a, ai)
131 ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
132 new_allocno_emit_data_vec.create (50);
136 /* Free the emit data. */
137 void
138 ira_finish_emit_data (void)
140 void_p p;
141 ira_allocno_t a;
142 ira_allocno_iterator ai;
144 ira_free (ira_allocno_emit_data);
145 FOR_EACH_ALLOCNO (a, ai)
146 ALLOCNO_ADD_DATA (a) = NULL;
147 for (;new_allocno_emit_data_vec.length () != 0;)
149 p = new_allocno_emit_data_vec.pop ();
150 ira_free (p);
152 new_allocno_emit_data_vec.release ();
155 /* Create and return a new allocno with given REGNO and
156 LOOP_TREE_NODE. Allocate emit data for it. */
157 static ira_allocno_t
158 create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
160 ira_allocno_t a;
162 a = ira_create_allocno (regno, false, loop_tree_node);
163 ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
164 memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
165 new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
166 return a;
171 /* See comments below. */
172 typedef struct move *move_t;
174 /* The structure represents an allocno move. Both allocnos have the
175 same original regno but different allocation. */
176 struct move
178 /* The allocnos involved in the move. */
179 ira_allocno_t from, to;
180 /* The next move in the move sequence. */
181 move_t next;
182 /* Used for finding dependencies. */
183 bool visited_p;
184 /* The size of the following array. */
185 int deps_num;
186 /* Moves on which given move depends on. Dependency can be cyclic.
187 It means we need a temporary to generates the moves. Sequence
188 A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
189 B1 are assigned to reg R2 is an example of the cyclic
190 dependencies. */
191 move_t *deps;
192 /* First insn generated for the move. */
193 rtx_insn *insn;
196 /* Array of moves (indexed by BB index) which should be put at the
197 start/end of the corresponding basic blocks. */
198 static move_t *at_bb_start, *at_bb_end;
200 /* Max regno before renaming some pseudo-registers. For example, the
201 same pseudo-register can be renamed in a loop if its allocation is
202 different outside the loop. */
203 static int max_regno_before_changing;
205 /* Return new move of allocnos TO and FROM. */
206 static move_t
207 create_move (ira_allocno_t to, ira_allocno_t from)
209 move_t move;
211 move = (move_t) ira_allocate (sizeof (struct move));
212 move->deps = NULL;
213 move->deps_num = 0;
214 move->to = to;
215 move->from = from;
216 move->next = NULL;
217 move->insn = NULL;
218 move->visited_p = false;
219 return move;
222 /* Free memory for MOVE and its dependencies. */
223 static void
224 free_move (move_t move)
226 if (move->deps != NULL)
227 ira_free (move->deps);
228 ira_free (move);
231 /* Free memory for list of the moves given by its HEAD. */
232 static void
233 free_move_list (move_t head)
235 move_t next;
237 for (; head != NULL; head = next)
239 next = head->next;
240 free_move (head);
244 /* Return TRUE if the move list LIST1 and LIST2 are equal (two
245 moves are equal if they involve the same allocnos). */
246 static bool
247 eq_move_lists_p (move_t list1, move_t list2)
249 for (; list1 != NULL && list2 != NULL;
250 list1 = list1->next, list2 = list2->next)
251 if (list1->from != list2->from || list1->to != list2->to)
252 return false;
253 return list1 == list2;
256 /* Print move list LIST into file F. */
257 static void
258 print_move_list (FILE *f, move_t list)
260 for (; list != NULL; list = list->next)
261 fprintf (f, " a%dr%d->a%dr%d",
262 ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
263 ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
264 fprintf (f, "\n");
267 extern void ira_debug_move_list (move_t list);
269 /* Print move list LIST into stderr. */
270 void
271 ira_debug_move_list (move_t list)
273 print_move_list (stderr, list);
276 /* This recursive function changes pseudo-registers in *LOC if it is
277 necessary. The function returns TRUE if a change was done. */
278 static bool
279 change_regs (rtx *loc)
281 int i, regno, result = false;
282 const char *fmt;
283 enum rtx_code code;
284 rtx reg;
286 if (*loc == NULL_RTX)
287 return false;
288 code = GET_CODE (*loc);
289 if (code == REG)
291 regno = REGNO (*loc);
292 if (regno < FIRST_PSEUDO_REGISTER)
293 return false;
294 if (regno >= max_regno_before_changing)
295 /* It is a shared register which was changed already. */
296 return false;
297 if (ira_curr_regno_allocno_map[regno] == NULL)
298 return false;
299 reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
300 if (reg == *loc)
301 return false;
302 *loc = reg;
303 return true;
306 fmt = GET_RTX_FORMAT (code);
307 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
309 if (fmt[i] == 'e')
310 result = change_regs (&XEXP (*loc, i)) || result;
311 else if (fmt[i] == 'E')
313 int j;
315 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
316 result = change_regs (&XVECEXP (*loc, i, j)) || result;
319 return result;
322 static bool
323 change_regs_in_insn (rtx_insn **insn_ptr)
325 rtx rtx = *insn_ptr;
326 bool result = change_regs (&rtx);
327 *insn_ptr = as_a <rtx_insn *> (rtx);
328 return result;
331 /* Attach MOVE to the edge E. The move is attached to the head of the
332 list if HEAD_P is TRUE. */
333 static void
334 add_to_edge_list (edge e, move_t move, bool head_p)
336 move_t last;
338 if (head_p || e->aux == NULL)
340 move->next = (move_t) e->aux;
341 e->aux = move;
343 else
345 for (last = (move_t) e->aux; last->next != NULL; last = last->next)
347 last->next = move;
348 move->next = NULL;
352 /* Create and return new pseudo-register with the same attributes as
353 ORIGINAL_REG. */
355 ira_create_new_reg (rtx original_reg)
357 rtx new_reg;
359 new_reg = gen_reg_rtx (GET_MODE (original_reg));
360 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
361 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
362 REG_POINTER (new_reg) = REG_POINTER (original_reg);
363 REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
364 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
365 fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n",
366 REGNO (new_reg), REGNO (original_reg));
367 ira_expand_reg_equiv ();
368 return new_reg;
371 /* Return TRUE if loop given by SUBNODE inside the loop given by
372 NODE. */
373 static bool
374 subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
376 for (; subnode != NULL; subnode = subnode->parent)
377 if (subnode == node)
378 return true;
379 return false;
382 /* Set up member `reg' to REG for allocnos which has the same regno as
383 ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
384 static void
385 set_allocno_reg (ira_allocno_t allocno, rtx reg)
387 int regno;
388 ira_allocno_t a;
389 ira_loop_tree_node_t node;
391 node = ALLOCNO_LOOP_TREE_NODE (allocno);
392 for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
393 a != NULL;
394 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
395 if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
396 ALLOCNO_EMIT_DATA (a)->reg = reg;
397 for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
398 ALLOCNO_EMIT_DATA (a)->reg = reg;
399 regno = ALLOCNO_REGNO (allocno);
400 for (a = allocno;;)
402 if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
404 node = node->parent;
405 if (node == NULL)
406 break;
407 a = node->regno_allocno_map[regno];
409 if (a == NULL)
410 continue;
411 if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
412 break;
413 ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
417 /* Return true if there is an entry to given loop not from its parent
418 (or grandparent) block. For example, it is possible for two
419 adjacent loops inside another loop. */
420 static bool
421 entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
423 ira_loop_tree_node_t bb_node, src_loop_node, parent;
424 edge e;
425 edge_iterator ei;
427 for (bb_node = loop_node->children;
428 bb_node != NULL;
429 bb_node = bb_node->next)
430 if (bb_node->bb != NULL)
432 FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
433 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
434 && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
436 for (parent = src_loop_node->parent;
437 parent != NULL;
438 parent = parent->parent)
439 if (parent == loop_node)
440 break;
441 if (parent != NULL)
442 /* That is an exit from a nested loop -- skip it. */
443 continue;
444 for (parent = loop_node->parent;
445 parent != NULL;
446 parent = parent->parent)
447 if (src_loop_node == parent)
448 break;
449 if (parent == NULL)
450 return true;
453 return false;
456 /* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */
457 static void
458 setup_entered_from_non_parent_p (void)
460 unsigned int i;
461 loop_p loop;
463 ira_assert (current_loops != NULL);
464 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
465 if (ira_loop_nodes[i].regno_allocno_map != NULL)
466 ira_loop_nodes[i].entered_from_non_parent_p
467 = entered_from_non_parent_p (&ira_loop_nodes[i]);
470 /* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
471 DEST_ALLOCNO (assigned to memory) can be removed because it does
472 not change value of the destination. One possible reason for this
473 is the situation when SRC_ALLOCNO is not modified in the
474 corresponding loop. */
475 static bool
476 store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
478 int regno, orig_regno;
479 ira_allocno_t a;
480 ira_loop_tree_node_t node;
482 ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
483 && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
484 orig_regno = ALLOCNO_REGNO (src_allocno);
485 regno = REGNO (allocno_emit_reg (dest_allocno));
486 for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
487 node != NULL;
488 node = node->parent)
490 a = node->regno_allocno_map[orig_regno];
491 ira_assert (a != NULL);
492 if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
493 /* We achieved the destination and everything is ok. */
494 return true;
495 else if (bitmap_bit_p (node->modified_regnos, orig_regno))
496 return false;
497 else if (node->entered_from_non_parent_p)
498 /* If there is a path from a destination loop block to the
499 source loop header containing basic blocks of non-parents
500 (grandparents) of the source loop, we should have checked
501 modifications of the pseudo on this path too to decide
502 about possibility to remove the store. It could be done by
503 solving a data-flow problem. Unfortunately such global
504 solution would complicate IR flattening. Therefore we just
505 prohibit removal of the store in such complicated case. */
506 return false;
508 /* It is actually a loop entry -- do not remove the store. */
509 return false;
512 /* Generate and attach moves to the edge E. This looks at the final
513 regnos of allocnos living on the edge with the same original regno
514 to figure out when moves should be generated. */
515 static void
516 generate_edge_moves (edge e)
518 ira_loop_tree_node_t src_loop_node, dest_loop_node;
519 unsigned int regno;
520 bitmap_iterator bi;
521 ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
522 move_t move;
523 bitmap regs_live_in_dest, regs_live_out_src;
525 src_loop_node = IRA_BB_NODE (e->src)->parent;
526 dest_loop_node = IRA_BB_NODE (e->dest)->parent;
527 e->aux = NULL;
528 if (src_loop_node == dest_loop_node)
529 return;
530 src_map = src_loop_node->regno_allocno_map;
531 dest_map = dest_loop_node->regno_allocno_map;
532 regs_live_in_dest = df_get_live_in (e->dest);
533 regs_live_out_src = df_get_live_out (e->src);
534 EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
535 FIRST_PSEUDO_REGISTER, regno, bi)
536 if (bitmap_bit_p (regs_live_out_src, regno))
538 src_allocno = src_map[regno];
539 dest_allocno = dest_map[regno];
540 if (REGNO (allocno_emit_reg (src_allocno))
541 == REGNO (allocno_emit_reg (dest_allocno)))
542 continue;
543 /* Remove unnecessary stores at the region exit. We should do
544 this for readonly memory for sure and this is guaranteed by
545 that we never generate moves on region borders (see
546 checking in function change_loop). */
547 if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
548 && ALLOCNO_HARD_REGNO (src_allocno) >= 0
549 && store_can_be_removed_p (src_allocno, dest_allocno))
551 ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
552 ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
553 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
554 fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
555 regno, ALLOCNO_NUM (src_allocno),
556 ALLOCNO_NUM (dest_allocno));
557 continue;
559 move = create_move (dest_allocno, src_allocno);
560 add_to_edge_list (e, move, true);
564 /* Bitmap of allocnos local for the current loop. */
565 static bitmap local_allocno_bitmap;
567 /* This bitmap is used to find that we need to generate and to use a
568 new pseudo-register when processing allocnos with the same original
569 regno. */
570 static bitmap used_regno_bitmap;
572 /* This bitmap contains regnos of allocnos which were renamed locally
573 because the allocnos correspond to disjoint live ranges in loops
574 with a common parent. */
575 static bitmap renamed_regno_bitmap;
577 /* Change (if necessary) pseudo-registers inside loop given by loop
578 tree node NODE. */
579 static void
580 change_loop (ira_loop_tree_node_t node)
582 bitmap_iterator bi;
583 unsigned int i;
584 int regno;
585 bool used_p;
586 ira_allocno_t allocno, parent_allocno, *map;
587 rtx_insn *insn;
588 rtx original_reg;
589 enum reg_class aclass, pclass;
590 ira_loop_tree_node_t parent;
592 if (node != ira_loop_tree_root)
594 ira_assert (current_loops != NULL);
596 if (node->bb != NULL)
598 FOR_BB_INSNS (node->bb, insn)
599 if (INSN_P (insn) && change_regs_in_insn (&insn))
601 df_insn_rescan (insn);
602 df_notes_rescan (insn);
604 return;
607 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
608 fprintf (ira_dump_file,
609 " Changing RTL for loop %d (header bb%d)\n",
610 node->loop_num, node->loop->header->index);
612 parent = ira_curr_loop_tree_node->parent;
613 map = parent->regno_allocno_map;
614 EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
615 0, i, bi)
617 allocno = ira_allocnos[i];
618 regno = ALLOCNO_REGNO (allocno);
619 aclass = ALLOCNO_CLASS (allocno);
620 pclass = ira_pressure_class_translate[aclass];
621 parent_allocno = map[regno];
622 ira_assert (regno < ira_reg_equiv_len);
623 /* We generate the same hard register move because the
624 reload pass can put an allocno into memory in this case
625 we will have live range splitting. If it does not happen
626 such the same hard register moves will be removed. The
627 worst case when the both allocnos are put into memory by
628 the reload is very rare. */
629 if (parent_allocno != NULL
630 && (ALLOCNO_HARD_REGNO (allocno)
631 == ALLOCNO_HARD_REGNO (parent_allocno))
632 && (ALLOCNO_HARD_REGNO (allocno) < 0
633 || (parent->reg_pressure[pclass] + 1
634 <= ira_class_hard_regs_num[pclass])
635 || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
636 [ALLOCNO_MODE (allocno)],
637 ALLOCNO_HARD_REGNO (allocno))
638 /* don't create copies because reload can spill an
639 allocno set by copy although the allocno will not
640 get memory slot. */
641 || ira_equiv_no_lvalue_p (regno)
642 || (pic_offset_table_rtx != NULL
643 && (ALLOCNO_REGNO (allocno)
644 == (int) REGNO (pic_offset_table_rtx)))))
645 continue;
646 original_reg = allocno_emit_reg (allocno);
647 if (parent_allocno == NULL
648 || (REGNO (allocno_emit_reg (parent_allocno))
649 == REGNO (original_reg)))
651 if (internal_flag_ira_verbose > 3 && ira_dump_file)
652 fprintf (ira_dump_file, " %i vs parent %i:",
653 ALLOCNO_HARD_REGNO (allocno),
654 ALLOCNO_HARD_REGNO (parent_allocno));
655 set_allocno_reg (allocno, ira_create_new_reg (original_reg));
659 /* Rename locals: Local allocnos with same regno in different loops
660 might get the different hard register. So we need to change
661 ALLOCNO_REG. */
662 bitmap_and_compl (local_allocno_bitmap,
663 ira_curr_loop_tree_node->all_allocnos,
664 ira_curr_loop_tree_node->border_allocnos);
665 EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
667 allocno = ira_allocnos[i];
668 regno = ALLOCNO_REGNO (allocno);
669 if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
670 continue;
671 used_p = !bitmap_set_bit (used_regno_bitmap, regno);
672 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
673 if (! used_p)
674 continue;
675 bitmap_set_bit (renamed_regno_bitmap, regno);
676 set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
680 /* Process to set up flag somewhere_renamed_p. */
681 static void
682 set_allocno_somewhere_renamed_p (void)
684 unsigned int regno;
685 ira_allocno_t allocno;
686 ira_allocno_iterator ai;
688 FOR_EACH_ALLOCNO (allocno, ai)
690 regno = ALLOCNO_REGNO (allocno);
691 if (bitmap_bit_p (renamed_regno_bitmap, regno)
692 && REGNO (allocno_emit_reg (allocno)) == regno)
693 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
697 /* Return TRUE if move lists on all edges given in vector VEC are
698 equal. */
699 static bool
700 eq_edge_move_lists_p (vec<edge, va_gc> *vec)
702 move_t list;
703 int i;
705 list = (move_t) EDGE_I (vec, 0)->aux;
706 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
707 if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
708 return false;
709 return true;
712 /* Look at all entry edges (if START_P) or exit edges of basic block
713 BB and put move lists at the BB start or end if it is possible. In
714 other words, this decreases code duplication of allocno moves. */
715 static void
716 unify_moves (basic_block bb, bool start_p)
718 int i;
719 edge e;
720 move_t list;
721 vec<edge, va_gc> *vec;
723 vec = (start_p ? bb->preds : bb->succs);
724 if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
725 return;
726 e = EDGE_I (vec, 0);
727 list = (move_t) e->aux;
728 if (! start_p && control_flow_insn_p (BB_END (bb)))
729 return;
730 e->aux = NULL;
731 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
733 e = EDGE_I (vec, i);
734 free_move_list ((move_t) e->aux);
735 e->aux = NULL;
737 if (start_p)
738 at_bb_start[bb->index] = list;
739 else
740 at_bb_end[bb->index] = list;
743 /* Last move (in move sequence being processed) setting up the
744 corresponding hard register. */
745 static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
747 /* If the element value is equal to CURR_TICK then the corresponding
748 element in `hard_regno_last_set' is defined and correct. */
749 static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
751 /* Last move (in move sequence being processed) setting up the
752 corresponding allocno. */
753 static move_t *allocno_last_set;
755 /* If the element value is equal to CURR_TICK then the corresponding
756 element in . `allocno_last_set' is defined and correct. */
757 static int *allocno_last_set_check;
759 /* Definition of vector of moves. */
761 /* This vec contains moves sorted topologically (depth-first) on their
762 dependency graph. */
763 static vec<move_t> move_vec;
765 /* The variable value is used to check correctness of values of
766 elements of arrays `hard_regno_last_set' and
767 `allocno_last_set_check'. */
768 static int curr_tick;
770 /* This recursive function traverses dependencies of MOVE and produces
771 topological sorting (in depth-first order). */
772 static void
773 traverse_moves (move_t move)
775 int i;
777 if (move->visited_p)
778 return;
779 move->visited_p = true;
780 for (i = move->deps_num - 1; i >= 0; i--)
781 traverse_moves (move->deps[i]);
782 move_vec.safe_push (move);
785 /* Remove unnecessary moves in the LIST, makes topological sorting,
786 and removes cycles on hard reg dependencies by introducing new
787 allocnos assigned to memory and additional moves. It returns the
788 result move list. */
789 static move_t
790 modify_move_list (move_t list)
792 int i, n, nregs, hard_regno;
793 ira_allocno_t to, from;
794 move_t move, new_move, set_move, first, last;
796 if (list == NULL)
797 return NULL;
798 /* Create move deps. */
799 curr_tick++;
800 for (move = list; move != NULL; move = move->next)
802 to = move->to;
803 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
804 continue;
805 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
806 for (i = 0; i < nregs; i++)
808 hard_regno_last_set[hard_regno + i] = move;
809 hard_regno_last_set_check[hard_regno + i] = curr_tick;
812 for (move = list; move != NULL; move = move->next)
814 from = move->from;
815 to = move->to;
816 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
818 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
819 for (n = i = 0; i < nregs; i++)
820 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
821 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
822 != ALLOCNO_REGNO (from)))
823 n++;
824 move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
825 for (n = i = 0; i < nregs; i++)
826 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
827 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
828 != ALLOCNO_REGNO (from)))
829 move->deps[n++] = hard_regno_last_set[hard_regno + i];
830 move->deps_num = n;
833 /* Topological sorting: */
834 move_vec.truncate (0);
835 for (move = list; move != NULL; move = move->next)
836 traverse_moves (move);
837 last = NULL;
838 for (i = (int) move_vec.length () - 1; i >= 0; i--)
840 move = move_vec[i];
841 move->next = NULL;
842 if (last != NULL)
843 last->next = move;
844 last = move;
846 first = move_vec.last ();
847 /* Removing cycles: */
848 curr_tick++;
849 move_vec.truncate (0);
850 for (move = first; move != NULL; move = move->next)
852 from = move->from;
853 to = move->to;
854 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
856 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
857 for (i = 0; i < nregs; i++)
858 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
859 && ALLOCNO_HARD_REGNO
860 (hard_regno_last_set[hard_regno + i]->to) >= 0)
862 int n, j;
863 ira_allocno_t new_allocno;
865 set_move = hard_regno_last_set[hard_regno + i];
866 /* It does not matter what loop_tree_node (of TO or
867 FROM) to use for the new allocno because of
868 subsequent IRA internal representation
869 flattening. */
870 new_allocno
871 = create_new_allocno (ALLOCNO_REGNO (set_move->to),
872 ALLOCNO_LOOP_TREE_NODE (set_move->to));
873 ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
874 ira_set_allocno_class (new_allocno,
875 ALLOCNO_CLASS (set_move->to));
876 ira_create_allocno_objects (new_allocno);
877 ALLOCNO_ASSIGNED_P (new_allocno) = true;
878 ALLOCNO_HARD_REGNO (new_allocno) = -1;
879 ALLOCNO_EMIT_DATA (new_allocno)->reg
880 = ira_create_new_reg (allocno_emit_reg (set_move->to));
882 /* Make it possibly conflicting with all earlier
883 created allocnos. Cases where temporary allocnos
884 created to remove the cycles are quite rare. */
885 n = ALLOCNO_NUM_OBJECTS (new_allocno);
886 gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
887 for (j = 0; j < n; j++)
889 ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
891 OBJECT_MIN (new_obj) = 0;
892 OBJECT_MAX (new_obj) = ira_objects_num - 1;
895 new_move = create_move (set_move->to, new_allocno);
896 set_move->to = new_allocno;
897 move_vec.safe_push (new_move);
898 ira_move_loops_num++;
899 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
900 fprintf (ira_dump_file,
901 " Creating temporary allocno a%dr%d\n",
902 ALLOCNO_NUM (new_allocno),
903 REGNO (allocno_emit_reg (new_allocno)));
906 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
907 continue;
908 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
909 for (i = 0; i < nregs; i++)
911 hard_regno_last_set[hard_regno + i] = move;
912 hard_regno_last_set_check[hard_regno + i] = curr_tick;
915 for (i = (int) move_vec.length () - 1; i >= 0; i--)
917 move = move_vec[i];
918 move->next = NULL;
919 last->next = move;
920 last = move;
922 return first;
925 /* Generate RTX move insns from the move list LIST. This updates
926 allocation cost using move execution frequency FREQ. */
927 static rtx_insn *
928 emit_move_list (move_t list, int freq)
930 rtx to, from, dest;
931 int to_regno, from_regno, cost, regno;
932 rtx_insn *result, *insn;
933 rtx set;
934 machine_mode mode;
935 enum reg_class aclass;
937 grow_reg_equivs ();
938 start_sequence ();
939 for (; list != NULL; list = list->next)
941 start_sequence ();
942 to = allocno_emit_reg (list->to);
943 to_regno = REGNO (to);
944 from = allocno_emit_reg (list->from);
945 from_regno = REGNO (from);
946 emit_move_insn (to, from);
947 list->insn = get_insns ();
948 end_sequence ();
949 for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
951 /* The reload needs to have set up insn codes. If the
952 reload sets up insn codes by itself, it may fail because
953 insns will have hard registers instead of pseudos and
954 there may be no machine insn with given hard
955 registers. */
956 recog_memoized (insn);
957 /* Add insn to equiv init insn list if it is necessary.
958 Otherwise reload will not remove this insn if it decides
959 to use the equivalence. */
960 if ((set = single_set (insn)) != NULL_RTX)
962 dest = SET_DEST (set);
963 if (GET_CODE (dest) == SUBREG)
964 dest = SUBREG_REG (dest);
965 ira_assert (REG_P (dest));
966 regno = REGNO (dest);
967 if (regno >= ira_reg_equiv_len
968 || (ira_reg_equiv[regno].invariant == NULL_RTX
969 && ira_reg_equiv[regno].constant == NULL_RTX))
970 continue; /* regno has no equivalence. */
971 ira_assert ((int) reg_equivs->length () > regno);
972 reg_equiv_init (regno)
973 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
976 if (ira_use_lra_p)
977 ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
978 emit_insn (list->insn);
979 mode = ALLOCNO_MODE (list->to);
980 aclass = ALLOCNO_CLASS (list->to);
981 cost = 0;
982 if (ALLOCNO_HARD_REGNO (list->to) < 0)
984 if (ALLOCNO_HARD_REGNO (list->from) >= 0)
986 cost = ira_memory_move_cost[mode][aclass][0] * freq;
987 ira_store_cost += cost;
990 else if (ALLOCNO_HARD_REGNO (list->from) < 0)
992 if (ALLOCNO_HARD_REGNO (list->to) >= 0)
994 cost = ira_memory_move_cost[mode][aclass][0] * freq;
995 ira_load_cost += cost;
998 else
1000 ira_init_register_move_cost_if_necessary (mode);
1001 cost = ira_register_move_cost[mode][aclass][aclass] * freq;
1002 ira_shuffle_cost += cost;
1004 ira_overall_cost += cost;
1006 result = get_insns ();
1007 end_sequence ();
1008 return result;
1011 /* Generate RTX move insns from move lists attached to basic blocks
1012 and edges. */
1013 static void
1014 emit_moves (void)
1016 basic_block bb;
1017 edge_iterator ei;
1018 edge e;
1019 rtx_insn *insns, *tmp;
1021 FOR_EACH_BB_FN (bb, cfun)
1023 if (at_bb_start[bb->index] != NULL)
1025 at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1026 insns = emit_move_list (at_bb_start[bb->index],
1027 REG_FREQ_FROM_BB (bb));
1028 tmp = BB_HEAD (bb);
1029 if (LABEL_P (tmp))
1030 tmp = NEXT_INSN (tmp);
1031 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1032 tmp = NEXT_INSN (tmp);
1033 if (tmp == BB_HEAD (bb))
1034 emit_insn_before (insns, tmp);
1035 else if (tmp != NULL_RTX)
1036 emit_insn_after (insns, PREV_INSN (tmp));
1037 else
1038 emit_insn_after (insns, get_last_insn ());
1041 if (at_bb_end[bb->index] != NULL)
1043 at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1044 insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1045 ira_assert (! control_flow_insn_p (BB_END (bb)));
1046 emit_insn_after (insns, BB_END (bb));
1049 FOR_EACH_EDGE (e, ei, bb->succs)
1051 if (e->aux == NULL)
1052 continue;
1053 ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1054 || ! EDGE_CRITICAL_P (e));
1055 e->aux = modify_move_list ((move_t) e->aux);
1056 insert_insn_on_edge
1057 (emit_move_list ((move_t) e->aux,
1058 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1060 if (e->src->next_bb != e->dest)
1061 ira_additional_jumps_num++;
1066 /* Update costs of A and corresponding allocnos on upper levels on the
1067 loop tree from reading (if READ_P) or writing A on an execution
1068 path with FREQ. */
1069 static void
1070 update_costs (ira_allocno_t a, bool read_p, int freq)
1072 ira_loop_tree_node_t parent;
1074 for (;;)
1076 ALLOCNO_NREFS (a)++;
1077 ALLOCNO_FREQ (a) += freq;
1078 ALLOCNO_MEMORY_COST (a)
1079 += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
1080 [read_p ? 1 : 0] * freq);
1081 if (ALLOCNO_CAP (a) != NULL)
1082 a = ALLOCNO_CAP (a);
1083 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1084 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1085 break;
1089 /* Process moves from LIST with execution FREQ to add ranges, copies,
1090 and modify costs for allocnos involved in the moves. All regnos
1091 living through the list is in LIVE_THROUGH, and the loop tree node
1092 used to find corresponding allocnos is NODE. */
1093 static void
1094 add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1095 bitmap live_through, int freq)
1097 int start, n;
1098 unsigned int regno;
1099 move_t move;
1100 ira_allocno_t a;
1101 ira_copy_t cp;
1102 live_range_t r;
1103 bitmap_iterator bi;
1104 HARD_REG_SET hard_regs_live;
1106 if (list == NULL)
1107 return;
1108 n = 0;
1109 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1110 n++;
1111 REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1112 /* This is a trick to guarantee that new ranges is not merged with
1113 the old ones. */
1114 ira_max_point++;
1115 start = ira_max_point;
1116 for (move = list; move != NULL; move = move->next)
1118 ira_allocno_t from = move->from;
1119 ira_allocno_t to = move->to;
1120 int nr, i;
1122 bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1123 bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
1125 nr = ALLOCNO_NUM_OBJECTS (to);
1126 for (i = 0; i < nr; i++)
1128 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1129 if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1131 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1132 fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
1133 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
1134 ira_allocate_object_conflicts (to_obj, n);
1137 ior_hard_reg_conflicts (from, &hard_regs_live);
1138 ior_hard_reg_conflicts (to, &hard_regs_live);
1140 update_costs (from, true, freq);
1141 update_costs (to, false, freq);
1142 cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
1143 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1144 fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
1145 cp->num, ALLOCNO_NUM (cp->first),
1146 REGNO (allocno_emit_reg (cp->first)),
1147 ALLOCNO_NUM (cp->second),
1148 REGNO (allocno_emit_reg (cp->second)));
1150 nr = ALLOCNO_NUM_OBJECTS (from);
1151 for (i = 0; i < nr; i++)
1153 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1154 r = OBJECT_LIVE_RANGES (from_obj);
1155 if (r == NULL || r->finish >= 0)
1157 ira_add_live_range_to_object (from_obj, start, ira_max_point);
1158 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1159 fprintf (ira_dump_file,
1160 " Adding range [%d..%d] to allocno a%dr%d\n",
1161 start, ira_max_point, ALLOCNO_NUM (from),
1162 REGNO (allocno_emit_reg (from)));
1164 else
1166 r->finish = ira_max_point;
1167 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1168 fprintf (ira_dump_file,
1169 " Adding range [%d..%d] to allocno a%dr%d\n",
1170 r->start, ira_max_point, ALLOCNO_NUM (from),
1171 REGNO (allocno_emit_reg (from)));
1174 ira_max_point++;
1175 nr = ALLOCNO_NUM_OBJECTS (to);
1176 for (i = 0; i < nr; i++)
1178 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1179 ira_add_live_range_to_object (to_obj, ira_max_point, -1);
1181 ira_max_point++;
1183 for (move = list; move != NULL; move = move->next)
1185 int nr, i;
1186 nr = ALLOCNO_NUM_OBJECTS (move->to);
1187 for (i = 0; i < nr; i++)
1189 ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1190 r = OBJECT_LIVE_RANGES (to_obj);
1191 if (r->finish < 0)
1193 r->finish = ira_max_point - 1;
1194 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1195 fprintf (ira_dump_file,
1196 " Adding range [%d..%d] to allocno a%dr%d\n",
1197 r->start, r->finish, ALLOCNO_NUM (move->to),
1198 REGNO (allocno_emit_reg (move->to)));
1202 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1204 ira_allocno_t to;
1205 int nr, i;
1207 a = node->regno_allocno_map[regno];
1208 if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
1209 a = to;
1210 nr = ALLOCNO_NUM_OBJECTS (a);
1211 for (i = 0; i < nr; i++)
1213 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1214 ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1216 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1217 fprintf
1218 (ira_dump_file,
1219 " Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1220 start, ira_max_point - 1,
1221 to != NULL ? "upper level" : "",
1222 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
1226 /* Process all move list to add ranges, conflicts, copies, and modify
1227 costs for allocnos involved in the moves. */
1228 static void
1229 add_ranges_and_copies (void)
1231 basic_block bb;
1232 edge_iterator ei;
1233 edge e;
1234 ira_loop_tree_node_t node;
1235 bitmap live_through;
1237 live_through = ira_allocate_bitmap ();
1238 FOR_EACH_BB_FN (bb, cfun)
1240 /* It does not matter what loop_tree_node (of source or
1241 destination block) to use for searching allocnos by their
1242 regnos because of subsequent IR flattening. */
1243 node = IRA_BB_NODE (bb)->parent;
1244 bitmap_copy (live_through, df_get_live_in (bb));
1245 add_range_and_copies_from_move_list
1246 (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1247 bitmap_copy (live_through, df_get_live_out (bb));
1248 add_range_and_copies_from_move_list
1249 (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1250 FOR_EACH_EDGE (e, ei, bb->succs)
1252 bitmap_and (live_through,
1253 df_get_live_in (e->dest), df_get_live_out (bb));
1254 add_range_and_copies_from_move_list
1255 ((move_t) e->aux, node, live_through,
1256 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1259 ira_free_bitmap (live_through);
1262 /* The entry function changes code and generates shuffling allocnos on
1263 region borders for the regional (LOOPS_P is TRUE in this case)
1264 register allocation. */
1265 void
1266 ira_emit (bool loops_p)
1268 basic_block bb;
1269 rtx_insn *insn;
1270 edge_iterator ei;
1271 edge e;
1272 ira_allocno_t a;
1273 ira_allocno_iterator ai;
1274 size_t sz;
1276 FOR_EACH_ALLOCNO (a, ai)
1277 ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
1278 if (! loops_p)
1279 return;
1280 sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1281 at_bb_start = (move_t *) ira_allocate (sz);
1282 memset (at_bb_start, 0, sz);
1283 at_bb_end = (move_t *) ira_allocate (sz);
1284 memset (at_bb_end, 0, sz);
1285 local_allocno_bitmap = ira_allocate_bitmap ();
1286 used_regno_bitmap = ira_allocate_bitmap ();
1287 renamed_regno_bitmap = ira_allocate_bitmap ();
1288 max_regno_before_changing = max_reg_num ();
1289 ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1290 set_allocno_somewhere_renamed_p ();
1291 ira_free_bitmap (used_regno_bitmap);
1292 ira_free_bitmap (renamed_regno_bitmap);
1293 ira_free_bitmap (local_allocno_bitmap);
1294 setup_entered_from_non_parent_p ();
1295 FOR_EACH_BB_FN (bb, cfun)
1297 at_bb_start[bb->index] = NULL;
1298 at_bb_end[bb->index] = NULL;
1299 FOR_EACH_EDGE (e, ei, bb->succs)
1300 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1301 generate_edge_moves (e);
1303 allocno_last_set
1304 = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1305 allocno_last_set_check
1306 = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1307 memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1308 memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1309 curr_tick = 0;
1310 FOR_EACH_BB_FN (bb, cfun)
1311 unify_moves (bb, true);
1312 FOR_EACH_BB_FN (bb, cfun)
1313 unify_moves (bb, false);
1314 move_vec.create (ira_allocnos_num);
1315 emit_moves ();
1316 add_ranges_and_copies ();
1317 /* Clean up: */
1318 FOR_EACH_BB_FN (bb, cfun)
1320 free_move_list (at_bb_start[bb->index]);
1321 free_move_list (at_bb_end[bb->index]);
1322 FOR_EACH_EDGE (e, ei, bb->succs)
1324 free_move_list ((move_t) e->aux);
1325 e->aux = NULL;
1328 move_vec.release ();
1329 ira_free (allocno_last_set_check);
1330 ira_free (allocno_last_set);
1331 commit_edge_insertions ();
1332 /* Fix insn codes. It is necessary to do it before reload because
1333 reload assumes initial insn codes defined. The insn codes can be
1334 invalidated by CFG infrastructure for example in jump
1335 redirection. */
1336 FOR_EACH_BB_FN (bb, cfun)
1337 FOR_BB_INSNS_REVERSE (bb, insn)
1338 if (INSN_P (insn))
1339 recog_memoized (insn);
1340 ira_free (at_bb_end);
1341 ira_free (at_bb_start);