2015-11-26 Paolo Bonzini <bonzini@gnu.org>
[official-gcc.git] / gcc / ira-emit.c
blobf8bb447acf6154dd4589e7074e37aab7e5347346
1 /* Integrated Register Allocator. Changing code and generating moves.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* When we have more one region, we need to change the original RTL
22 code after coloring. Let us consider two allocnos representing the
23 same pseudo-register outside and inside a region respectively.
24 They can get different hard-registers. The reload pass works on
25 pseudo registers basis and there is no way to say the reload that
26 pseudo could be in different registers and it is even more
27 difficult to say in what places of the code the pseudo should have
28 particular hard-registers. So in this case IRA has to create and
29 use a new pseudo-register inside the region and adds code to move
30 allocno values on the region's borders. This is done by the code
31 in this file.
33 The code makes top-down traversal of the regions and generate new
34 pseudos and the move code on the region borders. In some
35 complicated cases IRA can create a new pseudo used temporarily to
36 move allocno values when a swap of values stored in two
37 hard-registers is needed (e.g. two allocnos representing different
38 pseudos outside region got respectively hard registers 1 and 2 and
39 the corresponding allocnos inside the region got respectively hard
40 registers 2 and 1). At this stage, the new pseudo is marked as
41 spilled.
43 IRA still creates the pseudo-register and the moves on the region
44 borders even when the both corresponding allocnos were assigned to
45 the same hard-register. It is done because, if the reload pass for
46 some reason spills a pseudo-register representing the original
47 pseudo outside or inside the region, the effect will be smaller
48 because another pseudo will still be in the hard-register. In most
49 cases, this is better then spilling the original pseudo in its
50 whole live-range. If reload does not change the allocation for the
51 two pseudo-registers, the trivial move will be removed by
52 post-reload optimizations.
54 IRA does not generate a new pseudo and moves for the allocno values
55 if the both allocnos representing an original pseudo inside and
56 outside region assigned to the same hard register when the register
57 pressure in the region for the corresponding pressure class is less
58 than number of available hard registers for given pressure class.
60 IRA also does some optimizations to remove redundant moves which is
61 transformed into stores by the reload pass on CFG edges
62 representing exits from the region.
64 IRA tries to reduce duplication of code generated on CFG edges
65 which are enters and exits to/from regions by moving some code to
66 the edge sources or destinations when it is possible. */
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "backend.h"
72 #include "rtl.h"
73 #include "tree.h"
74 #include "predict.h"
75 #include "df.h"
76 #include "insn-config.h"
77 #include "regs.h"
78 #include "ira.h"
79 #include "ira-int.h"
80 #include "cfgrtl.h"
81 #include "cfgbuild.h"
82 #include "expr.h"
83 #include "reload.h"
84 #include "cfgloop.h"
87 /* Data used to emit live range split insns and to flattening IR. */
88 ira_emit_data_t ira_allocno_emit_data;
90 /* Definitions for vectors of pointers. */
91 typedef void *void_p;
93 /* Pointers to data allocated for allocnos being created during
94 emitting. Usually there are quite few such allocnos because they
95 are created only for resolving loop in register shuffling. */
96 static vec<void_p> new_allocno_emit_data_vec;
98 /* Allocate and initiate the emit data. */
99 void
100 ira_initiate_emit_data (void)
102 ira_allocno_t a;
103 ira_allocno_iterator ai;
105 ira_allocno_emit_data
106 = (ira_emit_data_t) ira_allocate (ira_allocnos_num
107 * sizeof (struct ira_emit_data));
108 memset (ira_allocno_emit_data, 0,
109 ira_allocnos_num * sizeof (struct ira_emit_data));
110 FOR_EACH_ALLOCNO (a, ai)
111 ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
112 new_allocno_emit_data_vec.create (50);
116 /* Free the emit data. */
117 void
118 ira_finish_emit_data (void)
120 void_p p;
121 ira_allocno_t a;
122 ira_allocno_iterator ai;
124 ira_free (ira_allocno_emit_data);
125 FOR_EACH_ALLOCNO (a, ai)
126 ALLOCNO_ADD_DATA (a) = NULL;
127 for (;new_allocno_emit_data_vec.length () != 0;)
129 p = new_allocno_emit_data_vec.pop ();
130 ira_free (p);
132 new_allocno_emit_data_vec.release ();
135 /* Create and return a new allocno with given REGNO and
136 LOOP_TREE_NODE. Allocate emit data for it. */
137 static ira_allocno_t
138 create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
140 ira_allocno_t a;
142 a = ira_create_allocno (regno, false, loop_tree_node);
143 ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
144 memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
145 new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
146 return a;
151 /* See comments below. */
152 typedef struct move *move_t;
154 /* The structure represents an allocno move. Both allocnos have the
155 same original regno but different allocation. */
156 struct move
158 /* The allocnos involved in the move. */
159 ira_allocno_t from, to;
160 /* The next move in the move sequence. */
161 move_t next;
162 /* Used for finding dependencies. */
163 bool visited_p;
164 /* The size of the following array. */
165 int deps_num;
166 /* Moves on which given move depends on. Dependency can be cyclic.
167 It means we need a temporary to generates the moves. Sequence
168 A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
169 B1 are assigned to reg R2 is an example of the cyclic
170 dependencies. */
171 move_t *deps;
172 /* First insn generated for the move. */
173 rtx_insn *insn;
176 /* Array of moves (indexed by BB index) which should be put at the
177 start/end of the corresponding basic blocks. */
178 static move_t *at_bb_start, *at_bb_end;
180 /* Max regno before renaming some pseudo-registers. For example, the
181 same pseudo-register can be renamed in a loop if its allocation is
182 different outside the loop. */
183 static int max_regno_before_changing;
185 /* Return new move of allocnos TO and FROM. */
186 static move_t
187 create_move (ira_allocno_t to, ira_allocno_t from)
189 move_t move;
191 move = (move_t) ira_allocate (sizeof (struct move));
192 move->deps = NULL;
193 move->deps_num = 0;
194 move->to = to;
195 move->from = from;
196 move->next = NULL;
197 move->insn = NULL;
198 move->visited_p = false;
199 return move;
202 /* Free memory for MOVE and its dependencies. */
203 static void
204 free_move (move_t move)
206 if (move->deps != NULL)
207 ira_free (move->deps);
208 ira_free (move);
211 /* Free memory for list of the moves given by its HEAD. */
212 static void
213 free_move_list (move_t head)
215 move_t next;
217 for (; head != NULL; head = next)
219 next = head->next;
220 free_move (head);
224 /* Return TRUE if the move list LIST1 and LIST2 are equal (two
225 moves are equal if they involve the same allocnos). */
226 static bool
227 eq_move_lists_p (move_t list1, move_t list2)
229 for (; list1 != NULL && list2 != NULL;
230 list1 = list1->next, list2 = list2->next)
231 if (list1->from != list2->from || list1->to != list2->to)
232 return false;
233 return list1 == list2;
236 /* Print move list LIST into file F. */
237 static void
238 print_move_list (FILE *f, move_t list)
240 for (; list != NULL; list = list->next)
241 fprintf (f, " a%dr%d->a%dr%d",
242 ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
243 ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
244 fprintf (f, "\n");
247 extern void ira_debug_move_list (move_t list);
249 /* Print move list LIST into stderr. */
250 void
251 ira_debug_move_list (move_t list)
253 print_move_list (stderr, list);
256 /* This recursive function changes pseudo-registers in *LOC if it is
257 necessary. The function returns TRUE if a change was done. */
258 static bool
259 change_regs (rtx *loc)
261 int i, regno, result = false;
262 const char *fmt;
263 enum rtx_code code;
264 rtx reg;
266 if (*loc == NULL_RTX)
267 return false;
268 code = GET_CODE (*loc);
269 if (code == REG)
271 regno = REGNO (*loc);
272 if (regno < FIRST_PSEUDO_REGISTER)
273 return false;
274 if (regno >= max_regno_before_changing)
275 /* It is a shared register which was changed already. */
276 return false;
277 if (ira_curr_regno_allocno_map[regno] == NULL)
278 return false;
279 reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
280 if (reg == *loc)
281 return false;
282 *loc = reg;
283 return true;
286 fmt = GET_RTX_FORMAT (code);
287 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
289 if (fmt[i] == 'e')
290 result = change_regs (&XEXP (*loc, i)) || result;
291 else if (fmt[i] == 'E')
293 int j;
295 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
296 result = change_regs (&XVECEXP (*loc, i, j)) || result;
299 return result;
302 static bool
303 change_regs_in_insn (rtx_insn **insn_ptr)
305 rtx rtx = *insn_ptr;
306 bool result = change_regs (&rtx);
307 *insn_ptr = as_a <rtx_insn *> (rtx);
308 return result;
311 /* Attach MOVE to the edge E. The move is attached to the head of the
312 list if HEAD_P is TRUE. */
313 static void
314 add_to_edge_list (edge e, move_t move, bool head_p)
316 move_t last;
318 if (head_p || e->aux == NULL)
320 move->next = (move_t) e->aux;
321 e->aux = move;
323 else
325 for (last = (move_t) e->aux; last->next != NULL; last = last->next)
327 last->next = move;
328 move->next = NULL;
332 /* Create and return new pseudo-register with the same attributes as
333 ORIGINAL_REG. */
335 ira_create_new_reg (rtx original_reg)
337 rtx new_reg;
339 new_reg = gen_reg_rtx (GET_MODE (original_reg));
340 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
341 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
342 REG_POINTER (new_reg) = REG_POINTER (original_reg);
343 REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
344 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
345 fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n",
346 REGNO (new_reg), REGNO (original_reg));
347 ira_expand_reg_equiv ();
348 return new_reg;
351 /* Return TRUE if loop given by SUBNODE inside the loop given by
352 NODE. */
353 static bool
354 subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
356 for (; subnode != NULL; subnode = subnode->parent)
357 if (subnode == node)
358 return true;
359 return false;
362 /* Set up member `reg' to REG for allocnos which has the same regno as
363 ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
364 static void
365 set_allocno_reg (ira_allocno_t allocno, rtx reg)
367 int regno;
368 ira_allocno_t a;
369 ira_loop_tree_node_t node;
371 node = ALLOCNO_LOOP_TREE_NODE (allocno);
372 for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
373 a != NULL;
374 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
375 if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
376 ALLOCNO_EMIT_DATA (a)->reg = reg;
377 for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
378 ALLOCNO_EMIT_DATA (a)->reg = reg;
379 regno = ALLOCNO_REGNO (allocno);
380 for (a = allocno;;)
382 if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
384 node = node->parent;
385 if (node == NULL)
386 break;
387 a = node->regno_allocno_map[regno];
389 if (a == NULL)
390 continue;
391 if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
392 break;
393 ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
397 /* Return true if there is an entry to given loop not from its parent
398 (or grandparent) block. For example, it is possible for two
399 adjacent loops inside another loop. */
400 static bool
401 entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
403 ira_loop_tree_node_t bb_node, src_loop_node, parent;
404 edge e;
405 edge_iterator ei;
407 for (bb_node = loop_node->children;
408 bb_node != NULL;
409 bb_node = bb_node->next)
410 if (bb_node->bb != NULL)
412 FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
413 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
414 && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
416 for (parent = src_loop_node->parent;
417 parent != NULL;
418 parent = parent->parent)
419 if (parent == loop_node)
420 break;
421 if (parent != NULL)
422 /* That is an exit from a nested loop -- skip it. */
423 continue;
424 for (parent = loop_node->parent;
425 parent != NULL;
426 parent = parent->parent)
427 if (src_loop_node == parent)
428 break;
429 if (parent == NULL)
430 return true;
433 return false;
436 /* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */
437 static void
438 setup_entered_from_non_parent_p (void)
440 unsigned int i;
441 loop_p loop;
443 ira_assert (current_loops != NULL);
444 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
445 if (ira_loop_nodes[i].regno_allocno_map != NULL)
446 ira_loop_nodes[i].entered_from_non_parent_p
447 = entered_from_non_parent_p (&ira_loop_nodes[i]);
450 /* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
451 DEST_ALLOCNO (assigned to memory) can be removed because it does
452 not change value of the destination. One possible reason for this
453 is the situation when SRC_ALLOCNO is not modified in the
454 corresponding loop. */
455 static bool
456 store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
458 int regno, orig_regno;
459 ira_allocno_t a;
460 ira_loop_tree_node_t node;
462 ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
463 && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
464 orig_regno = ALLOCNO_REGNO (src_allocno);
465 regno = REGNO (allocno_emit_reg (dest_allocno));
466 for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
467 node != NULL;
468 node = node->parent)
470 a = node->regno_allocno_map[orig_regno];
471 ira_assert (a != NULL);
472 if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
473 /* We achieved the destination and everything is ok. */
474 return true;
475 else if (bitmap_bit_p (node->modified_regnos, orig_regno))
476 return false;
477 else if (node->entered_from_non_parent_p)
478 /* If there is a path from a destination loop block to the
479 source loop header containing basic blocks of non-parents
480 (grandparents) of the source loop, we should have checked
481 modifications of the pseudo on this path too to decide
482 about possibility to remove the store. It could be done by
483 solving a data-flow problem. Unfortunately such global
484 solution would complicate IR flattening. Therefore we just
485 prohibit removal of the store in such complicated case. */
486 return false;
488 /* It is actually a loop entry -- do not remove the store. */
489 return false;
492 /* Generate and attach moves to the edge E. This looks at the final
493 regnos of allocnos living on the edge with the same original regno
494 to figure out when moves should be generated. */
495 static void
496 generate_edge_moves (edge e)
498 ira_loop_tree_node_t src_loop_node, dest_loop_node;
499 unsigned int regno;
500 bitmap_iterator bi;
501 ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
502 move_t move;
503 bitmap regs_live_in_dest, regs_live_out_src;
505 src_loop_node = IRA_BB_NODE (e->src)->parent;
506 dest_loop_node = IRA_BB_NODE (e->dest)->parent;
507 e->aux = NULL;
508 if (src_loop_node == dest_loop_node)
509 return;
510 src_map = src_loop_node->regno_allocno_map;
511 dest_map = dest_loop_node->regno_allocno_map;
512 regs_live_in_dest = df_get_live_in (e->dest);
513 regs_live_out_src = df_get_live_out (e->src);
514 EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
515 FIRST_PSEUDO_REGISTER, regno, bi)
516 if (bitmap_bit_p (regs_live_out_src, regno))
518 src_allocno = src_map[regno];
519 dest_allocno = dest_map[regno];
520 if (REGNO (allocno_emit_reg (src_allocno))
521 == REGNO (allocno_emit_reg (dest_allocno)))
522 continue;
523 /* Remove unnecessary stores at the region exit. We should do
524 this for readonly memory for sure and this is guaranteed by
525 that we never generate moves on region borders (see
526 checking in function change_loop). */
527 if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
528 && ALLOCNO_HARD_REGNO (src_allocno) >= 0
529 && store_can_be_removed_p (src_allocno, dest_allocno))
531 ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
532 ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
533 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
534 fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
535 regno, ALLOCNO_NUM (src_allocno),
536 ALLOCNO_NUM (dest_allocno));
537 continue;
539 move = create_move (dest_allocno, src_allocno);
540 add_to_edge_list (e, move, true);
544 /* Bitmap of allocnos local for the current loop. */
545 static bitmap local_allocno_bitmap;
547 /* This bitmap is used to find that we need to generate and to use a
548 new pseudo-register when processing allocnos with the same original
549 regno. */
550 static bitmap used_regno_bitmap;
552 /* This bitmap contains regnos of allocnos which were renamed locally
553 because the allocnos correspond to disjoint live ranges in loops
554 with a common parent. */
555 static bitmap renamed_regno_bitmap;
557 /* Change (if necessary) pseudo-registers inside loop given by loop
558 tree node NODE. */
559 static void
560 change_loop (ira_loop_tree_node_t node)
562 bitmap_iterator bi;
563 unsigned int i;
564 int regno;
565 bool used_p;
566 ira_allocno_t allocno, parent_allocno, *map;
567 rtx_insn *insn;
568 rtx original_reg;
569 enum reg_class aclass, pclass;
570 ira_loop_tree_node_t parent;
572 if (node != ira_loop_tree_root)
574 ira_assert (current_loops != NULL);
576 if (node->bb != NULL)
578 FOR_BB_INSNS (node->bb, insn)
579 if (INSN_P (insn) && change_regs_in_insn (&insn))
581 df_insn_rescan (insn);
582 df_notes_rescan (insn);
584 return;
587 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
588 fprintf (ira_dump_file,
589 " Changing RTL for loop %d (header bb%d)\n",
590 node->loop_num, node->loop->header->index);
592 parent = ira_curr_loop_tree_node->parent;
593 map = parent->regno_allocno_map;
594 EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
595 0, i, bi)
597 allocno = ira_allocnos[i];
598 regno = ALLOCNO_REGNO (allocno);
599 aclass = ALLOCNO_CLASS (allocno);
600 pclass = ira_pressure_class_translate[aclass];
601 parent_allocno = map[regno];
602 ira_assert (regno < ira_reg_equiv_len);
603 /* We generate the same hard register move because the
604 reload pass can put an allocno into memory in this case
605 we will have live range splitting. If it does not happen
606 such the same hard register moves will be removed. The
607 worst case when the both allocnos are put into memory by
608 the reload is very rare. */
609 if (parent_allocno != NULL
610 && (ALLOCNO_HARD_REGNO (allocno)
611 == ALLOCNO_HARD_REGNO (parent_allocno))
612 && (ALLOCNO_HARD_REGNO (allocno) < 0
613 || (parent->reg_pressure[pclass] + 1
614 <= ira_class_hard_regs_num[pclass])
615 || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
616 [ALLOCNO_MODE (allocno)],
617 ALLOCNO_HARD_REGNO (allocno))
618 /* don't create copies because reload can spill an
619 allocno set by copy although the allocno will not
620 get memory slot. */
621 || ira_equiv_no_lvalue_p (regno)
622 || (pic_offset_table_rtx != NULL
623 && (ALLOCNO_REGNO (allocno)
624 == (int) REGNO (pic_offset_table_rtx)))))
625 continue;
626 original_reg = allocno_emit_reg (allocno);
627 if (parent_allocno == NULL
628 || (REGNO (allocno_emit_reg (parent_allocno))
629 == REGNO (original_reg)))
631 if (internal_flag_ira_verbose > 3 && ira_dump_file)
632 fprintf (ira_dump_file, " %i vs parent %i:",
633 ALLOCNO_HARD_REGNO (allocno),
634 ALLOCNO_HARD_REGNO (parent_allocno));
635 set_allocno_reg (allocno, ira_create_new_reg (original_reg));
639 /* Rename locals: Local allocnos with same regno in different loops
640 might get the different hard register. So we need to change
641 ALLOCNO_REG. */
642 bitmap_and_compl (local_allocno_bitmap,
643 ira_curr_loop_tree_node->all_allocnos,
644 ira_curr_loop_tree_node->border_allocnos);
645 EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
647 allocno = ira_allocnos[i];
648 regno = ALLOCNO_REGNO (allocno);
649 if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
650 continue;
651 used_p = !bitmap_set_bit (used_regno_bitmap, regno);
652 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
653 if (! used_p)
654 continue;
655 bitmap_set_bit (renamed_regno_bitmap, regno);
656 set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
660 /* Process to set up flag somewhere_renamed_p. */
661 static void
662 set_allocno_somewhere_renamed_p (void)
664 unsigned int regno;
665 ira_allocno_t allocno;
666 ira_allocno_iterator ai;
668 FOR_EACH_ALLOCNO (allocno, ai)
670 regno = ALLOCNO_REGNO (allocno);
671 if (bitmap_bit_p (renamed_regno_bitmap, regno)
672 && REGNO (allocno_emit_reg (allocno)) == regno)
673 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
677 /* Return TRUE if move lists on all edges given in vector VEC are
678 equal. */
679 static bool
680 eq_edge_move_lists_p (vec<edge, va_gc> *vec)
682 move_t list;
683 int i;
685 list = (move_t) EDGE_I (vec, 0)->aux;
686 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
687 if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
688 return false;
689 return true;
692 /* Look at all entry edges (if START_P) or exit edges of basic block
693 BB and put move lists at the BB start or end if it is possible. In
694 other words, this decreases code duplication of allocno moves. */
695 static void
696 unify_moves (basic_block bb, bool start_p)
698 int i;
699 edge e;
700 move_t list;
701 vec<edge, va_gc> *vec;
703 vec = (start_p ? bb->preds : bb->succs);
704 if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
705 return;
706 e = EDGE_I (vec, 0);
707 list = (move_t) e->aux;
708 if (! start_p && control_flow_insn_p (BB_END (bb)))
709 return;
710 e->aux = NULL;
711 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
713 e = EDGE_I (vec, i);
714 free_move_list ((move_t) e->aux);
715 e->aux = NULL;
717 if (start_p)
718 at_bb_start[bb->index] = list;
719 else
720 at_bb_end[bb->index] = list;
723 /* Last move (in move sequence being processed) setting up the
724 corresponding hard register. */
725 static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
727 /* If the element value is equal to CURR_TICK then the corresponding
728 element in `hard_regno_last_set' is defined and correct. */
729 static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
731 /* Last move (in move sequence being processed) setting up the
732 corresponding allocno. */
733 static move_t *allocno_last_set;
735 /* If the element value is equal to CURR_TICK then the corresponding
736 element in . `allocno_last_set' is defined and correct. */
737 static int *allocno_last_set_check;
739 /* Definition of vector of moves. */
741 /* This vec contains moves sorted topologically (depth-first) on their
742 dependency graph. */
743 static vec<move_t> move_vec;
745 /* The variable value is used to check correctness of values of
746 elements of arrays `hard_regno_last_set' and
747 `allocno_last_set_check'. */
748 static int curr_tick;
750 /* This recursive function traverses dependencies of MOVE and produces
751 topological sorting (in depth-first order). */
752 static void
753 traverse_moves (move_t move)
755 int i;
757 if (move->visited_p)
758 return;
759 move->visited_p = true;
760 for (i = move->deps_num - 1; i >= 0; i--)
761 traverse_moves (move->deps[i]);
762 move_vec.safe_push (move);
765 /* Remove unnecessary moves in the LIST, makes topological sorting,
766 and removes cycles on hard reg dependencies by introducing new
767 allocnos assigned to memory and additional moves. It returns the
768 result move list. */
769 static move_t
770 modify_move_list (move_t list)
772 int i, n, nregs, hard_regno;
773 ira_allocno_t to, from;
774 move_t move, new_move, set_move, first, last;
776 if (list == NULL)
777 return NULL;
778 /* Create move deps. */
779 curr_tick++;
780 for (move = list; move != NULL; move = move->next)
782 to = move->to;
783 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
784 continue;
785 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
786 for (i = 0; i < nregs; i++)
788 hard_regno_last_set[hard_regno + i] = move;
789 hard_regno_last_set_check[hard_regno + i] = curr_tick;
792 for (move = list; move != NULL; move = move->next)
794 from = move->from;
795 to = move->to;
796 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
798 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
799 for (n = i = 0; i < nregs; i++)
800 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
801 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
802 != ALLOCNO_REGNO (from)))
803 n++;
804 move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
805 for (n = i = 0; i < nregs; i++)
806 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
807 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
808 != ALLOCNO_REGNO (from)))
809 move->deps[n++] = hard_regno_last_set[hard_regno + i];
810 move->deps_num = n;
813 /* Topological sorting: */
814 move_vec.truncate (0);
815 for (move = list; move != NULL; move = move->next)
816 traverse_moves (move);
817 last = NULL;
818 for (i = (int) move_vec.length () - 1; i >= 0; i--)
820 move = move_vec[i];
821 move->next = NULL;
822 if (last != NULL)
823 last->next = move;
824 last = move;
826 first = move_vec.last ();
827 /* Removing cycles: */
828 curr_tick++;
829 move_vec.truncate (0);
830 for (move = first; move != NULL; move = move->next)
832 from = move->from;
833 to = move->to;
834 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
836 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
837 for (i = 0; i < nregs; i++)
838 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
839 && ALLOCNO_HARD_REGNO
840 (hard_regno_last_set[hard_regno + i]->to) >= 0)
842 int n, j;
843 ira_allocno_t new_allocno;
845 set_move = hard_regno_last_set[hard_regno + i];
846 /* It does not matter what loop_tree_node (of TO or
847 FROM) to use for the new allocno because of
848 subsequent IRA internal representation
849 flattening. */
850 new_allocno
851 = create_new_allocno (ALLOCNO_REGNO (set_move->to),
852 ALLOCNO_LOOP_TREE_NODE (set_move->to));
853 ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
854 ira_set_allocno_class (new_allocno,
855 ALLOCNO_CLASS (set_move->to));
856 ira_create_allocno_objects (new_allocno);
857 ALLOCNO_ASSIGNED_P (new_allocno) = true;
858 ALLOCNO_HARD_REGNO (new_allocno) = -1;
859 ALLOCNO_EMIT_DATA (new_allocno)->reg
860 = ira_create_new_reg (allocno_emit_reg (set_move->to));
862 /* Make it possibly conflicting with all earlier
863 created allocnos. Cases where temporary allocnos
864 created to remove the cycles are quite rare. */
865 n = ALLOCNO_NUM_OBJECTS (new_allocno);
866 gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
867 for (j = 0; j < n; j++)
869 ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
871 OBJECT_MIN (new_obj) = 0;
872 OBJECT_MAX (new_obj) = ira_objects_num - 1;
875 new_move = create_move (set_move->to, new_allocno);
876 set_move->to = new_allocno;
877 move_vec.safe_push (new_move);
878 ira_move_loops_num++;
879 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
880 fprintf (ira_dump_file,
881 " Creating temporary allocno a%dr%d\n",
882 ALLOCNO_NUM (new_allocno),
883 REGNO (allocno_emit_reg (new_allocno)));
886 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
887 continue;
888 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
889 for (i = 0; i < nregs; i++)
891 hard_regno_last_set[hard_regno + i] = move;
892 hard_regno_last_set_check[hard_regno + i] = curr_tick;
895 for (i = (int) move_vec.length () - 1; i >= 0; i--)
897 move = move_vec[i];
898 move->next = NULL;
899 last->next = move;
900 last = move;
902 return first;
905 /* Generate RTX move insns from the move list LIST. This updates
906 allocation cost using move execution frequency FREQ. */
907 static rtx_insn *
908 emit_move_list (move_t list, int freq)
910 rtx to, from, dest;
911 int to_regno, from_regno, cost, regno;
912 rtx_insn *result, *insn;
913 rtx set;
914 machine_mode mode;
915 enum reg_class aclass;
917 grow_reg_equivs ();
918 start_sequence ();
919 for (; list != NULL; list = list->next)
921 start_sequence ();
922 to = allocno_emit_reg (list->to);
923 to_regno = REGNO (to);
924 from = allocno_emit_reg (list->from);
925 from_regno = REGNO (from);
926 emit_move_insn (to, from);
927 list->insn = get_insns ();
928 end_sequence ();
929 for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
931 /* The reload needs to have set up insn codes. If the
932 reload sets up insn codes by itself, it may fail because
933 insns will have hard registers instead of pseudos and
934 there may be no machine insn with given hard
935 registers. */
936 recog_memoized (insn);
937 /* Add insn to equiv init insn list if it is necessary.
938 Otherwise reload will not remove this insn if it decides
939 to use the equivalence. */
940 if ((set = single_set (insn)) != NULL_RTX)
942 dest = SET_DEST (set);
943 if (GET_CODE (dest) == SUBREG)
944 dest = SUBREG_REG (dest);
945 ira_assert (REG_P (dest));
946 regno = REGNO (dest);
947 if (regno >= ira_reg_equiv_len
948 || (ira_reg_equiv[regno].invariant == NULL_RTX
949 && ira_reg_equiv[regno].constant == NULL_RTX))
950 continue; /* regno has no equivalence. */
951 ira_assert ((int) reg_equivs->length () > regno);
952 reg_equiv_init (regno)
953 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
956 if (ira_use_lra_p)
957 ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
958 emit_insn (list->insn);
959 mode = ALLOCNO_MODE (list->to);
960 aclass = ALLOCNO_CLASS (list->to);
961 cost = 0;
962 if (ALLOCNO_HARD_REGNO (list->to) < 0)
964 if (ALLOCNO_HARD_REGNO (list->from) >= 0)
966 cost = ira_memory_move_cost[mode][aclass][0] * freq;
967 ira_store_cost += cost;
970 else if (ALLOCNO_HARD_REGNO (list->from) < 0)
972 if (ALLOCNO_HARD_REGNO (list->to) >= 0)
974 cost = ira_memory_move_cost[mode][aclass][0] * freq;
975 ira_load_cost += cost;
978 else
980 ira_init_register_move_cost_if_necessary (mode);
981 cost = ira_register_move_cost[mode][aclass][aclass] * freq;
982 ira_shuffle_cost += cost;
984 ira_overall_cost += cost;
986 result = get_insns ();
987 end_sequence ();
988 return result;
991 /* Generate RTX move insns from move lists attached to basic blocks
992 and edges. */
993 static void
994 emit_moves (void)
996 basic_block bb;
997 edge_iterator ei;
998 edge e;
999 rtx_insn *insns, *tmp;
1001 FOR_EACH_BB_FN (bb, cfun)
1003 if (at_bb_start[bb->index] != NULL)
1005 at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1006 insns = emit_move_list (at_bb_start[bb->index],
1007 REG_FREQ_FROM_BB (bb));
1008 tmp = BB_HEAD (bb);
1009 if (LABEL_P (tmp))
1010 tmp = NEXT_INSN (tmp);
1011 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1012 tmp = NEXT_INSN (tmp);
1013 if (tmp == BB_HEAD (bb))
1014 emit_insn_before (insns, tmp);
1015 else if (tmp != NULL_RTX)
1016 emit_insn_after (insns, PREV_INSN (tmp));
1017 else
1018 emit_insn_after (insns, get_last_insn ());
1021 if (at_bb_end[bb->index] != NULL)
1023 at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1024 insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1025 ira_assert (! control_flow_insn_p (BB_END (bb)));
1026 emit_insn_after (insns, BB_END (bb));
1029 FOR_EACH_EDGE (e, ei, bb->succs)
1031 if (e->aux == NULL)
1032 continue;
1033 ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1034 || ! EDGE_CRITICAL_P (e));
1035 e->aux = modify_move_list ((move_t) e->aux);
1036 insert_insn_on_edge
1037 (emit_move_list ((move_t) e->aux,
1038 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1040 if (e->src->next_bb != e->dest)
1041 ira_additional_jumps_num++;
1046 /* Update costs of A and corresponding allocnos on upper levels on the
1047 loop tree from reading (if READ_P) or writing A on an execution
1048 path with FREQ. */
1049 static void
1050 update_costs (ira_allocno_t a, bool read_p, int freq)
1052 ira_loop_tree_node_t parent;
1054 for (;;)
1056 ALLOCNO_NREFS (a)++;
1057 ALLOCNO_FREQ (a) += freq;
1058 ALLOCNO_MEMORY_COST (a)
1059 += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
1060 [read_p ? 1 : 0] * freq);
1061 if (ALLOCNO_CAP (a) != NULL)
1062 a = ALLOCNO_CAP (a);
1063 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1064 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1065 break;
1069 /* Process moves from LIST with execution FREQ to add ranges, copies,
1070 and modify costs for allocnos involved in the moves. All regnos
1071 living through the list is in LIVE_THROUGH, and the loop tree node
1072 used to find corresponding allocnos is NODE. */
1073 static void
1074 add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1075 bitmap live_through, int freq)
1077 int start, n;
1078 unsigned int regno;
1079 move_t move;
1080 ira_allocno_t a;
1081 ira_copy_t cp;
1082 live_range_t r;
1083 bitmap_iterator bi;
1084 HARD_REG_SET hard_regs_live;
1086 if (list == NULL)
1087 return;
1088 n = 0;
1089 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1090 n++;
1091 REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1092 /* This is a trick to guarantee that new ranges is not merged with
1093 the old ones. */
1094 ira_max_point++;
1095 start = ira_max_point;
1096 for (move = list; move != NULL; move = move->next)
1098 ira_allocno_t from = move->from;
1099 ira_allocno_t to = move->to;
1100 int nr, i;
1102 bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1103 bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
1105 nr = ALLOCNO_NUM_OBJECTS (to);
1106 for (i = 0; i < nr; i++)
1108 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1109 if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1111 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1112 fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
1113 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
1114 ira_allocate_object_conflicts (to_obj, n);
1117 ior_hard_reg_conflicts (from, &hard_regs_live);
1118 ior_hard_reg_conflicts (to, &hard_regs_live);
1120 update_costs (from, true, freq);
1121 update_costs (to, false, freq);
1122 cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
1123 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1124 fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
1125 cp->num, ALLOCNO_NUM (cp->first),
1126 REGNO (allocno_emit_reg (cp->first)),
1127 ALLOCNO_NUM (cp->second),
1128 REGNO (allocno_emit_reg (cp->second)));
1130 nr = ALLOCNO_NUM_OBJECTS (from);
1131 for (i = 0; i < nr; i++)
1133 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1134 r = OBJECT_LIVE_RANGES (from_obj);
1135 if (r == NULL || r->finish >= 0)
1137 ira_add_live_range_to_object (from_obj, start, ira_max_point);
1138 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1139 fprintf (ira_dump_file,
1140 " Adding range [%d..%d] to allocno a%dr%d\n",
1141 start, ira_max_point, ALLOCNO_NUM (from),
1142 REGNO (allocno_emit_reg (from)));
1144 else
1146 r->finish = ira_max_point;
1147 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1148 fprintf (ira_dump_file,
1149 " Adding range [%d..%d] to allocno a%dr%d\n",
1150 r->start, ira_max_point, ALLOCNO_NUM (from),
1151 REGNO (allocno_emit_reg (from)));
1154 ira_max_point++;
1155 nr = ALLOCNO_NUM_OBJECTS (to);
1156 for (i = 0; i < nr; i++)
1158 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1159 ira_add_live_range_to_object (to_obj, ira_max_point, -1);
1161 ira_max_point++;
1163 for (move = list; move != NULL; move = move->next)
1165 int nr, i;
1166 nr = ALLOCNO_NUM_OBJECTS (move->to);
1167 for (i = 0; i < nr; i++)
1169 ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1170 r = OBJECT_LIVE_RANGES (to_obj);
1171 if (r->finish < 0)
1173 r->finish = ira_max_point - 1;
1174 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1175 fprintf (ira_dump_file,
1176 " Adding range [%d..%d] to allocno a%dr%d\n",
1177 r->start, r->finish, ALLOCNO_NUM (move->to),
1178 REGNO (allocno_emit_reg (move->to)));
1182 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1184 ira_allocno_t to;
1185 int nr, i;
1187 a = node->regno_allocno_map[regno];
1188 if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
1189 a = to;
1190 nr = ALLOCNO_NUM_OBJECTS (a);
1191 for (i = 0; i < nr; i++)
1193 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1194 ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1196 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1197 fprintf
1198 (ira_dump_file,
1199 " Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1200 start, ira_max_point - 1,
1201 to != NULL ? "upper level" : "",
1202 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
1206 /* Process all move list to add ranges, conflicts, copies, and modify
1207 costs for allocnos involved in the moves. */
1208 static void
1209 add_ranges_and_copies (void)
1211 basic_block bb;
1212 edge_iterator ei;
1213 edge e;
1214 ira_loop_tree_node_t node;
1215 bitmap live_through;
1217 live_through = ira_allocate_bitmap ();
1218 FOR_EACH_BB_FN (bb, cfun)
1220 /* It does not matter what loop_tree_node (of source or
1221 destination block) to use for searching allocnos by their
1222 regnos because of subsequent IR flattening. */
1223 node = IRA_BB_NODE (bb)->parent;
1224 bitmap_copy (live_through, df_get_live_in (bb));
1225 add_range_and_copies_from_move_list
1226 (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1227 bitmap_copy (live_through, df_get_live_out (bb));
1228 add_range_and_copies_from_move_list
1229 (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1230 FOR_EACH_EDGE (e, ei, bb->succs)
1232 bitmap_and (live_through,
1233 df_get_live_in (e->dest), df_get_live_out (bb));
1234 add_range_and_copies_from_move_list
1235 ((move_t) e->aux, node, live_through,
1236 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1239 ira_free_bitmap (live_through);
1242 /* The entry function changes code and generates shuffling allocnos on
1243 region borders for the regional (LOOPS_P is TRUE in this case)
1244 register allocation. */
1245 void
1246 ira_emit (bool loops_p)
1248 basic_block bb;
1249 rtx_insn *insn;
1250 edge_iterator ei;
1251 edge e;
1252 ira_allocno_t a;
1253 ira_allocno_iterator ai;
1254 size_t sz;
1256 FOR_EACH_ALLOCNO (a, ai)
1257 ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
1258 if (! loops_p)
1259 return;
1260 sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1261 at_bb_start = (move_t *) ira_allocate (sz);
1262 memset (at_bb_start, 0, sz);
1263 at_bb_end = (move_t *) ira_allocate (sz);
1264 memset (at_bb_end, 0, sz);
1265 local_allocno_bitmap = ira_allocate_bitmap ();
1266 used_regno_bitmap = ira_allocate_bitmap ();
1267 renamed_regno_bitmap = ira_allocate_bitmap ();
1268 max_regno_before_changing = max_reg_num ();
1269 ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1270 set_allocno_somewhere_renamed_p ();
1271 ira_free_bitmap (used_regno_bitmap);
1272 ira_free_bitmap (renamed_regno_bitmap);
1273 ira_free_bitmap (local_allocno_bitmap);
1274 setup_entered_from_non_parent_p ();
1275 FOR_EACH_BB_FN (bb, cfun)
1277 at_bb_start[bb->index] = NULL;
1278 at_bb_end[bb->index] = NULL;
1279 FOR_EACH_EDGE (e, ei, bb->succs)
1280 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1281 generate_edge_moves (e);
1283 allocno_last_set
1284 = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1285 allocno_last_set_check
1286 = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1287 memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1288 memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1289 curr_tick = 0;
1290 FOR_EACH_BB_FN (bb, cfun)
1291 unify_moves (bb, true);
1292 FOR_EACH_BB_FN (bb, cfun)
1293 unify_moves (bb, false);
1294 move_vec.create (ira_allocnos_num);
1295 emit_moves ();
1296 add_ranges_and_copies ();
1297 /* Clean up: */
1298 FOR_EACH_BB_FN (bb, cfun)
1300 free_move_list (at_bb_start[bb->index]);
1301 free_move_list (at_bb_end[bb->index]);
1302 FOR_EACH_EDGE (e, ei, bb->succs)
1304 free_move_list ((move_t) e->aux);
1305 e->aux = NULL;
1308 move_vec.release ();
1309 ira_free (allocno_last_set_check);
1310 ira_free (allocno_last_set);
1311 commit_edge_insertions ();
1312 /* Fix insn codes. It is necessary to do it before reload because
1313 reload assumes initial insn codes defined. The insn codes can be
1314 invalidated by CFG infrastructure for example in jump
1315 redirection. */
1316 FOR_EACH_BB_FN (bb, cfun)
1317 FOR_BB_INSNS_REVERSE (bb, insn)
1318 if (INSN_P (insn))
1319 recog_memoized (insn);
1320 ira_free (at_bb_end);
1321 ira_free (at_bb_start);