Fix to expose more LIM when creating mem_ref
[official-gcc.git] / gcc / ira-build.c
blobbf9124eca59a5b346ae648f1443a484c485c6b9a
1 /* Building internal representation for IRA.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "toplev.h"
37 #include "params.h"
38 #include "df.h"
39 #include "output.h"
40 #include "reload.h"
41 #include "sparseset.h"
42 #include "ira-int.h"
43 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
45 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
46 ira_loop_tree_node_t);
48 /* The root of the loop tree corresponding to the all function. */
49 ira_loop_tree_node_t ira_loop_tree_root;
51 /* Height of the loop tree. */
52 int ira_loop_tree_height;
54 /* All nodes representing basic blocks are referred through the
55 following array. We can not use basic block member `aux' for this
56 because it is used for insertion of insns on edges. */
57 ira_loop_tree_node_t ira_bb_nodes;
59 /* All nodes representing loops are referred through the following
60 array. */
61 ira_loop_tree_node_t ira_loop_nodes;
63 /* Map regno -> allocnos with given regno (see comments for
64 allocno member `next_regno_allocno'). */
65 ira_allocno_t *ira_regno_allocno_map;
67 /* Array of references to all allocnos. The order number of the
68 allocno corresponds to the index in the array. Removed allocnos
69 have NULL element value. */
70 ira_allocno_t *ira_allocnos;
72 /* Sizes of the previous array. */
73 int ira_allocnos_num;
75 /* Count of conflict record structures we've created, used when creating
76 a new conflict id. */
77 int ira_objects_num;
79 /* Map a conflict id to its conflict record. */
80 ira_object_t *ira_object_id_map;
82 /* Array of references to all copies. The order number of the copy
83 corresponds to the index in the array. Removed copies have NULL
84 element value. */
85 ira_copy_t *ira_copies;
87 /* Size of the previous array. */
88 int ira_copies_num;
92 /* LAST_BASIC_BLOCK before generating additional insns because of live
93 range splitting. Emitting insns on a critical edge creates a new
94 basic block. */
95 static int last_basic_block_before_change;
97 /* The following function allocates the loop tree nodes. If LOOPS_P
98 is FALSE, the nodes corresponding to the loops (except the root
99 which corresponds the all function) will be not allocated but nodes
100 will still be allocated for basic blocks. */
101 static void
102 create_loop_tree_nodes (bool loops_p)
104 unsigned int i, j;
105 int max_regno;
106 bool skip_p;
107 edge_iterator ei;
108 edge e;
109 VEC (edge, heap) *edges;
110 loop_p loop;
112 ira_bb_nodes
113 = ((struct ira_loop_tree_node *)
114 ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
115 last_basic_block_before_change = last_basic_block;
116 for (i = 0; i < (unsigned int) last_basic_block; i++)
118 ira_bb_nodes[i].regno_allocno_map = NULL;
119 memset (ira_bb_nodes[i].reg_pressure, 0,
120 sizeof (ira_bb_nodes[i].reg_pressure));
121 ira_bb_nodes[i].all_allocnos = NULL;
122 ira_bb_nodes[i].modified_regnos = NULL;
123 ira_bb_nodes[i].border_allocnos = NULL;
124 ira_bb_nodes[i].local_copies = NULL;
126 ira_loop_nodes = ((struct ira_loop_tree_node *)
127 ira_allocate (sizeof (struct ira_loop_tree_node)
128 * VEC_length (loop_p, ira_loops.larray)));
129 max_regno = max_reg_num ();
130 for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
132 if (loop != ira_loops.tree_root)
134 ira_loop_nodes[i].regno_allocno_map = NULL;
135 if (! loops_p)
136 continue;
137 skip_p = false;
138 FOR_EACH_EDGE (e, ei, loop->header->preds)
139 if (e->src != loop->latch
140 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
142 skip_p = true;
143 break;
145 if (skip_p)
146 continue;
147 edges = get_loop_exit_edges (loop);
148 for (j = 0; VEC_iterate (edge, edges, j, e); j++)
149 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
151 skip_p = true;
152 break;
154 VEC_free (edge, heap, edges);
155 if (skip_p)
156 continue;
158 ira_loop_nodes[i].regno_allocno_map
159 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
160 memset (ira_loop_nodes[i].regno_allocno_map, 0,
161 sizeof (ira_allocno_t) * max_regno);
162 memset (ira_loop_nodes[i].reg_pressure, 0,
163 sizeof (ira_loop_nodes[i].reg_pressure));
164 ira_loop_nodes[i].all_allocnos = ira_allocate_bitmap ();
165 ira_loop_nodes[i].modified_regnos = ira_allocate_bitmap ();
166 ira_loop_nodes[i].border_allocnos = ira_allocate_bitmap ();
167 ira_loop_nodes[i].local_copies = ira_allocate_bitmap ();
171 /* The function returns TRUE if there are more one allocation
172 region. */
173 static bool
174 more_one_region_p (void)
176 unsigned int i;
177 loop_p loop;
179 for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
180 if (ira_loop_nodes[i].regno_allocno_map != NULL
181 && ira_loop_tree_root != &ira_loop_nodes[i])
182 return true;
183 return false;
186 /* Free the loop tree node of a loop. */
187 static void
188 finish_loop_tree_node (ira_loop_tree_node_t loop)
190 if (loop->regno_allocno_map != NULL)
192 ira_assert (loop->bb == NULL);
193 ira_free_bitmap (loop->local_copies);
194 ira_free_bitmap (loop->border_allocnos);
195 ira_free_bitmap (loop->modified_regnos);
196 ira_free_bitmap (loop->all_allocnos);
197 ira_free (loop->regno_allocno_map);
198 loop->regno_allocno_map = NULL;
202 /* Free the loop tree nodes. */
203 static void
204 finish_loop_tree_nodes (void)
206 unsigned int i;
207 loop_p loop;
209 for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
210 finish_loop_tree_node (&ira_loop_nodes[i]);
211 ira_free (ira_loop_nodes);
212 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
214 if (ira_bb_nodes[i].local_copies != NULL)
215 ira_free_bitmap (ira_bb_nodes[i].local_copies);
216 if (ira_bb_nodes[i].border_allocnos != NULL)
217 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
218 if (ira_bb_nodes[i].modified_regnos != NULL)
219 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
220 if (ira_bb_nodes[i].all_allocnos != NULL)
221 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
222 if (ira_bb_nodes[i].regno_allocno_map != NULL)
223 ira_free (ira_bb_nodes[i].regno_allocno_map);
225 ira_free (ira_bb_nodes);
230 /* The following recursive function adds LOOP to the loop tree
231 hierarchy. LOOP is added only once. */
232 static void
233 add_loop_to_tree (struct loop *loop)
235 struct loop *parent;
236 ira_loop_tree_node_t loop_node, parent_node;
238 /* We can not use loop node access macros here because of potential
239 checking and because the nodes are not initialized enough
240 yet. */
241 if (loop_outer (loop) != NULL)
242 add_loop_to_tree (loop_outer (loop));
243 if (ira_loop_nodes[loop->num].regno_allocno_map != NULL
244 && ira_loop_nodes[loop->num].children == NULL)
246 /* We have not added loop node to the tree yet. */
247 loop_node = &ira_loop_nodes[loop->num];
248 loop_node->loop = loop;
249 loop_node->bb = NULL;
250 for (parent = loop_outer (loop);
251 parent != NULL;
252 parent = loop_outer (parent))
253 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
254 break;
255 if (parent == NULL)
257 loop_node->next = NULL;
258 loop_node->subloop_next = NULL;
259 loop_node->parent = NULL;
261 else
263 parent_node = &ira_loop_nodes[parent->num];
264 loop_node->next = parent_node->children;
265 parent_node->children = loop_node;
266 loop_node->subloop_next = parent_node->subloops;
267 parent_node->subloops = loop_node;
268 loop_node->parent = parent_node;
273 /* The following recursive function sets up levels of nodes of the
274 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
275 The function returns maximal value of level in the tree + 1. */
276 static int
277 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
279 int height, max_height;
280 ira_loop_tree_node_t subloop_node;
282 ira_assert (loop_node->bb == NULL);
283 loop_node->level = level;
284 max_height = level + 1;
285 for (subloop_node = loop_node->subloops;
286 subloop_node != NULL;
287 subloop_node = subloop_node->subloop_next)
289 ira_assert (subloop_node->bb == NULL);
290 height = setup_loop_tree_level (subloop_node, level + 1);
291 if (height > max_height)
292 max_height = height;
294 return max_height;
297 /* Create the loop tree. The algorithm is designed to provide correct
298 order of loops (they are ordered by their last loop BB) and basic
299 blocks in the chain formed by member next. */
300 static void
301 form_loop_tree (void)
303 unsigned int i;
304 basic_block bb;
305 struct loop *parent;
306 ira_loop_tree_node_t bb_node, loop_node;
307 loop_p loop;
309 /* We can not use loop/bb node access macros because of potential
310 checking and because the nodes are not initialized enough
311 yet. */
312 for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
313 if (ira_loop_nodes[i].regno_allocno_map != NULL)
315 ira_loop_nodes[i].children = NULL;
316 ira_loop_nodes[i].subloops = NULL;
318 FOR_EACH_BB (bb)
320 bb_node = &ira_bb_nodes[bb->index];
321 bb_node->bb = bb;
322 bb_node->loop = NULL;
323 bb_node->subloops = NULL;
324 bb_node->children = NULL;
325 bb_node->subloop_next = NULL;
326 bb_node->next = NULL;
327 for (parent = bb->loop_father;
328 parent != NULL;
329 parent = loop_outer (parent))
330 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
331 break;
332 add_loop_to_tree (parent);
333 loop_node = &ira_loop_nodes[parent->num];
334 bb_node->next = loop_node->children;
335 bb_node->parent = loop_node;
336 loop_node->children = bb_node;
338 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (ira_loops.tree_root->num);
339 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
340 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
345 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
346 tree nodes. */
347 static void
348 rebuild_regno_allocno_maps (void)
350 unsigned int l;
351 int max_regno, regno;
352 ira_allocno_t a;
353 ira_loop_tree_node_t loop_tree_node;
354 loop_p loop;
355 ira_allocno_iterator ai;
357 max_regno = max_reg_num ();
358 for (l = 0; VEC_iterate (loop_p, ira_loops.larray, l, loop); l++)
359 if (ira_loop_nodes[l].regno_allocno_map != NULL)
361 ira_free (ira_loop_nodes[l].regno_allocno_map);
362 ira_loop_nodes[l].regno_allocno_map
363 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
364 * max_regno);
365 memset (ira_loop_nodes[l].regno_allocno_map, 0,
366 sizeof (ira_allocno_t) * max_regno);
368 ira_free (ira_regno_allocno_map);
369 ira_regno_allocno_map
370 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
371 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
372 FOR_EACH_ALLOCNO (a, ai)
374 if (ALLOCNO_CAP_MEMBER (a) != NULL)
375 /* Caps are not in the regno allocno maps. */
376 continue;
377 regno = ALLOCNO_REGNO (a);
378 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
379 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
380 ira_regno_allocno_map[regno] = a;
381 if (loop_tree_node->regno_allocno_map[regno] == NULL)
382 /* Remember that we can create temporary allocnos to break
383 cycles in register shuffle. */
384 loop_tree_node->regno_allocno_map[regno] = a;
389 /* Pools for allocnos, allocno live ranges and objects. */
390 static alloc_pool allocno_pool, live_range_pool, object_pool;
392 /* Vec containing references to all created allocnos. It is a
393 container of array allocnos. */
394 static VEC(ira_allocno_t,heap) *allocno_vec;
396 /* Vec containing references to all created ira_objects. It is a
397 container of ira_object_id_map. */
398 static VEC(ira_object_t,heap) *ira_object_id_map_vec;
400 /* Initialize data concerning allocnos. */
401 static void
402 initiate_allocnos (void)
404 live_range_pool
405 = create_alloc_pool ("live ranges",
406 sizeof (struct live_range), 100);
407 allocno_pool
408 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
409 object_pool
410 = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
411 allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
412 ira_allocnos = NULL;
413 ira_allocnos_num = 0;
414 ira_objects_num = 0;
415 ira_object_id_map_vec
416 = VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
417 ira_object_id_map = NULL;
418 ira_regno_allocno_map
419 = (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
420 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
423 /* Create and return an object corresponding to a new allocno A. */
424 static ira_object_t
425 ira_create_object (ira_allocno_t a, int subword)
427 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
428 ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
430 OBJECT_ALLOCNO (obj) = a;
431 OBJECT_SUBWORD (obj) = subword;
432 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
433 OBJECT_CONFLICT_VEC_P (obj) = false;
434 OBJECT_CONFLICT_ARRAY (obj) = NULL;
435 OBJECT_NUM_CONFLICTS (obj) = 0;
436 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
437 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
438 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
439 reg_class_contents[cover_class]);
440 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
441 reg_class_contents[cover_class]);
442 OBJECT_MIN (obj) = INT_MAX;
443 OBJECT_MAX (obj) = -1;
444 OBJECT_LIVE_RANGES (obj) = NULL;
446 VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
447 ira_object_id_map
448 = VEC_address (ira_object_t, ira_object_id_map_vec);
449 ira_objects_num = VEC_length (ira_object_t, ira_object_id_map_vec);
451 return obj;
454 /* Create and return the allocno corresponding to REGNO in
455 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
456 same regno if CAP_P is FALSE. */
457 ira_allocno_t
458 ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
460 ira_allocno_t a;
462 a = (ira_allocno_t) pool_alloc (allocno_pool);
463 ALLOCNO_REGNO (a) = regno;
464 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
465 if (! cap_p)
467 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
468 ira_regno_allocno_map[regno] = a;
469 if (loop_tree_node->regno_allocno_map[regno] == NULL)
470 /* Remember that we can create temporary allocnos to break
471 cycles in register shuffle on region borders (see
472 ira-emit.c). */
473 loop_tree_node->regno_allocno_map[regno] = a;
475 ALLOCNO_CAP (a) = NULL;
476 ALLOCNO_CAP_MEMBER (a) = NULL;
477 ALLOCNO_NUM (a) = ira_allocnos_num;
478 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
479 ALLOCNO_NREFS (a) = 0;
480 ALLOCNO_FREQ (a) = 0;
481 ALLOCNO_HARD_REGNO (a) = -1;
482 ALLOCNO_CALL_FREQ (a) = 0;
483 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
484 #ifdef STACK_REGS
485 ALLOCNO_NO_STACK_REG_P (a) = false;
486 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
487 #endif
488 ALLOCNO_MEM_OPTIMIZED_DEST (a) = NULL;
489 ALLOCNO_MEM_OPTIMIZED_DEST_P (a) = false;
490 ALLOCNO_SOMEWHERE_RENAMED_P (a) = false;
491 ALLOCNO_CHILD_RENAMED_P (a) = false;
492 ALLOCNO_DONT_REASSIGN_P (a) = false;
493 ALLOCNO_BAD_SPILL_P (a) = false;
494 ALLOCNO_IN_GRAPH_P (a) = false;
495 ALLOCNO_ASSIGNED_P (a) = false;
496 ALLOCNO_MAY_BE_SPILLED_P (a) = false;
497 ALLOCNO_SPLAY_REMOVED_P (a) = false;
498 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
499 ALLOCNO_COPIES (a) = NULL;
500 ALLOCNO_HARD_REG_COSTS (a) = NULL;
501 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
502 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
503 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
504 ALLOCNO_LEFT_CONFLICTS_SIZE (a) = -1;
505 ALLOCNO_COVER_CLASS (a) = NO_REGS;
506 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = 0;
507 ALLOCNO_COVER_CLASS_COST (a) = 0;
508 ALLOCNO_MEMORY_COST (a) = 0;
509 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
510 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
511 ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = NULL;
512 ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
513 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
514 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
515 ALLOCNO_NUM_OBJECTS (a) = 0;
517 VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
518 ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
519 ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
521 return a;
524 /* Set up cover class for A and update its conflict hard registers. */
525 void
526 ira_set_allocno_cover_class (ira_allocno_t a, enum reg_class cover_class)
528 ALLOCNO_COVER_CLASS (a) = cover_class;
531 /* Determine the number of objects we should associate with allocno A
532 and allocate them. */
533 void
534 ira_create_allocno_objects (ira_allocno_t a)
536 enum machine_mode mode = ALLOCNO_MODE (a);
537 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
538 int n = ira_reg_class_nregs[cover_class][mode];
539 int i;
541 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
542 n = 1;
544 ALLOCNO_NUM_OBJECTS (a) = n;
545 for (i = 0; i < n; i++)
546 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
549 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
550 ALLOCNO_OBJECT structures. This must be called after the cover
551 classes are known. */
552 static void
553 create_allocno_objects (void)
555 ira_allocno_t a;
556 ira_allocno_iterator ai;
558 FOR_EACH_ALLOCNO (a, ai)
559 ira_create_allocno_objects (a);
562 /* Merge hard register conflict information for all objects associated with
563 allocno TO into the corresponding objects associated with FROM.
564 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
565 static void
566 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
567 bool total_only)
569 int i;
570 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
571 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
573 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
574 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
575 if (!total_only)
576 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
577 OBJECT_CONFLICT_HARD_REGS (from_obj));
578 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
579 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
581 #ifdef STACK_REGS
582 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
583 ALLOCNO_NO_STACK_REG_P (to) = true;
584 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
585 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
586 #endif
589 /* Update hard register conflict information for all objects associated with
590 A to include the regs in SET. */
591 void
592 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
594 ira_allocno_object_iterator i;
595 ira_object_t obj;
596 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
598 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
599 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
603 /* Return TRUE if a conflict vector with NUM elements is more
604 profitable than a conflict bit vector for OBJ. */
605 bool
606 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
608 int nw;
609 int max = OBJECT_MAX (obj);
610 int min = OBJECT_MIN (obj);
612 if (max < min)
613 /* We prefer a bit vector in such case because it does not result
614 in allocation. */
615 return false;
617 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
618 return (2 * sizeof (ira_object_t) * (num + 1)
619 < 3 * nw * sizeof (IRA_INT_TYPE));
622 /* Allocates and initialize the conflict vector of OBJ for NUM
623 conflicting objects. */
624 void
625 ira_allocate_conflict_vec (ira_object_t obj, int num)
627 int size;
628 ira_object_t *vec;
630 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
631 num++; /* for NULL end marker */
632 size = sizeof (ira_object_t) * num;
633 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
634 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
635 vec[0] = NULL;
636 OBJECT_NUM_CONFLICTS (obj) = 0;
637 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
638 OBJECT_CONFLICT_VEC_P (obj) = true;
641 /* Allocate and initialize the conflict bit vector of OBJ. */
642 static void
643 allocate_conflict_bit_vec (ira_object_t obj)
645 unsigned int size;
647 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
648 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
649 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
650 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
651 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
652 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
653 OBJECT_CONFLICT_VEC_P (obj) = false;
656 /* Allocate and initialize the conflict vector or conflict bit vector
657 of OBJ for NUM conflicting allocnos whatever is more profitable. */
658 void
659 ira_allocate_object_conflicts (ira_object_t obj, int num)
661 if (ira_conflict_vector_profitable_p (obj, num))
662 ira_allocate_conflict_vec (obj, num);
663 else
664 allocate_conflict_bit_vec (obj);
667 /* Add OBJ2 to the conflicts of OBJ1. */
668 static void
669 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
671 int num;
672 unsigned int size;
674 if (OBJECT_CONFLICT_VEC_P (obj1))
676 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
677 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
678 num = curr_num + 2;
679 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
681 ira_object_t *newvec;
682 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
683 newvec = (ira_object_t *) ira_allocate (size);
684 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
685 ira_free (vec);
686 vec = newvec;
687 OBJECT_CONFLICT_ARRAY (obj1) = vec;
688 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
690 vec[num - 2] = obj2;
691 vec[num - 1] = NULL;
692 OBJECT_NUM_CONFLICTS (obj1)++;
694 else
696 int nw, added_head_nw, id;
697 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
699 id = OBJECT_CONFLICT_ID (obj2);
700 if (OBJECT_MIN (obj1) > id)
702 /* Expand head of the bit vector. */
703 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
704 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
705 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
706 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
708 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
709 vec, nw * sizeof (IRA_INT_TYPE));
710 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
712 else
714 size
715 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
716 vec = (IRA_INT_TYPE *) ira_allocate (size);
717 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
718 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
719 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
720 memset ((char *) vec
721 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
722 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
723 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
724 OBJECT_CONFLICT_ARRAY (obj1) = vec;
725 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
727 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
729 else if (OBJECT_MAX (obj1) < id)
731 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
732 size = nw * sizeof (IRA_INT_TYPE);
733 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
735 /* Expand tail of the bit vector. */
736 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
737 vec = (IRA_INT_TYPE *) ira_allocate (size);
738 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
739 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
740 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
741 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
742 OBJECT_CONFLICT_ARRAY (obj1) = vec;
743 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
745 OBJECT_MAX (obj1) = id;
747 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
751 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
752 static void
753 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
755 add_to_conflicts (obj1, obj2);
756 add_to_conflicts (obj2, obj1);
759 /* Clear all conflicts of OBJ. */
760 static void
761 clear_conflicts (ira_object_t obj)
763 if (OBJECT_CONFLICT_VEC_P (obj))
765 OBJECT_NUM_CONFLICTS (obj) = 0;
766 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
768 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
770 int nw;
772 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
773 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
777 /* The array used to find duplications in conflict vectors of
778 allocnos. */
779 static int *conflict_check;
781 /* The value used to mark allocation presence in conflict vector of
782 the current allocno. */
783 static int curr_conflict_check_tick;
785 /* Remove duplications in conflict vector of OBJ. */
786 static void
787 compress_conflict_vec (ira_object_t obj)
789 ira_object_t *vec, conflict_obj;
790 int i, j;
792 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
793 vec = OBJECT_CONFLICT_VEC (obj);
794 curr_conflict_check_tick++;
795 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
797 int id = OBJECT_CONFLICT_ID (conflict_obj);
798 if (conflict_check[id] != curr_conflict_check_tick)
800 conflict_check[id] = curr_conflict_check_tick;
801 vec[j++] = conflict_obj;
804 OBJECT_NUM_CONFLICTS (obj) = j;
805 vec[j] = NULL;
808 /* Remove duplications in conflict vectors of all allocnos. */
809 static void
810 compress_conflict_vecs (void)
812 ira_object_t obj;
813 ira_object_iterator oi;
815 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
816 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
817 curr_conflict_check_tick = 0;
818 FOR_EACH_OBJECT (obj, oi)
820 if (OBJECT_CONFLICT_VEC_P (obj))
821 compress_conflict_vec (obj);
823 ira_free (conflict_check);
826 /* This recursive function outputs allocno A and if it is a cap the
827 function outputs its members. */
828 void
829 ira_print_expanded_allocno (ira_allocno_t a)
831 basic_block bb;
833 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
834 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
835 fprintf (ira_dump_file, ",b%d", bb->index);
836 else
837 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
838 if (ALLOCNO_CAP_MEMBER (a) != NULL)
840 fprintf (ira_dump_file, ":");
841 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
843 fprintf (ira_dump_file, ")");
846 /* Create and return the cap representing allocno A in the
847 parent loop. */
848 static ira_allocno_t
849 create_cap_allocno (ira_allocno_t a)
851 ira_allocno_t cap;
852 ira_loop_tree_node_t parent;
853 enum reg_class cover_class;
855 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
856 && ALLOCNO_NEXT_COALESCED_ALLOCNO (a) == a);
857 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
858 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
859 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
860 cover_class = ALLOCNO_COVER_CLASS (a);
861 ira_set_allocno_cover_class (cap, cover_class);
862 ira_create_allocno_objects (cap);
863 ALLOCNO_AVAILABLE_REGS_NUM (cap) = ALLOCNO_AVAILABLE_REGS_NUM (a);
864 ALLOCNO_CAP_MEMBER (cap) = a;
865 ALLOCNO_CAP (a) = cap;
866 ALLOCNO_COVER_CLASS_COST (cap) = ALLOCNO_COVER_CLASS_COST (a);
867 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
868 ira_allocate_and_copy_costs
869 (&ALLOCNO_HARD_REG_COSTS (cap), cover_class, ALLOCNO_HARD_REG_COSTS (a));
870 ira_allocate_and_copy_costs
871 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
872 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
873 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
874 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
875 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
876 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
878 merge_hard_reg_conflicts (a, cap, false);
880 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
881 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
883 fprintf (ira_dump_file, " Creating cap ");
884 ira_print_expanded_allocno (cap);
885 fprintf (ira_dump_file, "\n");
887 return cap;
890 /* Create and return a live range for OBJECT with given attributes. */
891 live_range_t
892 ira_create_live_range (ira_object_t obj, int start, int finish,
893 live_range_t next)
895 live_range_t p;
897 p = (live_range_t) pool_alloc (live_range_pool);
898 p->object = obj;
899 p->start = start;
900 p->finish = finish;
901 p->next = next;
902 return p;
905 /* Create a new live range for OBJECT and queue it at the head of its
906 live range list. */
907 void
908 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
910 live_range_t p;
911 p = ira_create_live_range (object, start, finish,
912 OBJECT_LIVE_RANGES (object));
913 OBJECT_LIVE_RANGES (object) = p;
916 /* Copy allocno live range R and return the result. */
917 static live_range_t
918 copy_live_range (live_range_t r)
920 live_range_t p;
922 p = (live_range_t) pool_alloc (live_range_pool);
923 *p = *r;
924 return p;
927 /* Copy allocno live range list given by its head R and return the
928 result. */
929 live_range_t
930 ira_copy_live_range_list (live_range_t r)
932 live_range_t p, first, last;
934 if (r == NULL)
935 return NULL;
936 for (first = last = NULL; r != NULL; r = r->next)
938 p = copy_live_range (r);
939 if (first == NULL)
940 first = p;
941 else
942 last->next = p;
943 last = p;
945 return first;
948 /* Merge ranges R1 and R2 and returns the result. The function
949 maintains the order of ranges and tries to minimize number of the
950 result ranges. */
951 live_range_t
952 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
954 live_range_t first, last, temp;
956 if (r1 == NULL)
957 return r2;
958 if (r2 == NULL)
959 return r1;
960 for (first = last = NULL; r1 != NULL && r2 != NULL;)
962 if (r1->start < r2->start)
964 temp = r1;
965 r1 = r2;
966 r2 = temp;
968 if (r1->start <= r2->finish + 1)
970 /* Intersected ranges: merge r1 and r2 into r1. */
971 r1->start = r2->start;
972 if (r1->finish < r2->finish)
973 r1->finish = r2->finish;
974 temp = r2;
975 r2 = r2->next;
976 ira_finish_live_range (temp);
977 if (r2 == NULL)
979 /* To try to merge with subsequent ranges in r1. */
980 r2 = r1->next;
981 r1->next = NULL;
984 else
986 /* Add r1 to the result. */
987 if (first == NULL)
988 first = last = r1;
989 else
991 last->next = r1;
992 last = r1;
994 r1 = r1->next;
995 if (r1 == NULL)
997 /* To try to merge with subsequent ranges in r2. */
998 r1 = r2->next;
999 r2->next = NULL;
1003 if (r1 != NULL)
1005 if (first == NULL)
1006 first = r1;
1007 else
1008 last->next = r1;
1009 ira_assert (r1->next == NULL);
1011 else if (r2 != NULL)
1013 if (first == NULL)
1014 first = r2;
1015 else
1016 last->next = r2;
1017 ira_assert (r2->next == NULL);
1019 else
1021 ira_assert (last->next == NULL);
1023 return first;
1026 /* Return TRUE if live ranges R1 and R2 intersect. */
1027 bool
1028 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1030 /* Remember the live ranges are always kept ordered. */
1031 while (r1 != NULL && r2 != NULL)
1033 if (r1->start > r2->finish)
1034 r1 = r1->next;
1035 else if (r2->start > r1->finish)
1036 r2 = r2->next;
1037 else
1038 return true;
1040 return false;
1043 /* Free allocno live range R. */
1044 void
1045 ira_finish_live_range (live_range_t r)
1047 pool_free (live_range_pool, r);
1050 /* Free list of allocno live ranges starting with R. */
1051 void
1052 ira_finish_live_range_list (live_range_t r)
1054 live_range_t next_r;
1056 for (; r != NULL; r = next_r)
1058 next_r = r->next;
1059 ira_finish_live_range (r);
1063 /* Free updated register costs of allocno A. */
1064 void
1065 ira_free_allocno_updated_costs (ira_allocno_t a)
1067 enum reg_class cover_class;
1069 cover_class = ALLOCNO_COVER_CLASS (a);
1070 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1071 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
1072 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1073 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1074 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1075 cover_class);
1076 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1079 /* Free the memory allocated for allocno A. */
1080 static void
1081 finish_allocno (ira_allocno_t a)
1083 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
1084 ira_object_t obj;
1085 ira_allocno_object_iterator oi;
1087 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1089 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1090 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1091 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1092 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1093 pool_free (object_pool, obj);
1096 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1097 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1098 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
1099 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1100 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
1101 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1102 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
1103 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1104 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1105 cover_class);
1106 pool_free (allocno_pool, a);
1109 /* Free the memory allocated for all allocnos. */
1110 static void
1111 finish_allocnos (void)
1113 ira_allocno_t a;
1114 ira_allocno_iterator ai;
1116 FOR_EACH_ALLOCNO (a, ai)
1117 finish_allocno (a);
1118 ira_free (ira_regno_allocno_map);
1119 VEC_free (ira_object_t, heap, ira_object_id_map_vec);
1120 VEC_free (ira_allocno_t, heap, allocno_vec);
1121 free_alloc_pool (allocno_pool);
1122 free_alloc_pool (object_pool);
1123 free_alloc_pool (live_range_pool);
1128 /* Pools for copies. */
1129 static alloc_pool copy_pool;
1131 /* Vec containing references to all created copies. It is a
1132 container of array ira_copies. */
1133 static VEC(ira_copy_t,heap) *copy_vec;
1135 /* The function initializes data concerning allocno copies. */
1136 static void
1137 initiate_copies (void)
1139 copy_pool
1140 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1141 copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
1142 ira_copies = NULL;
1143 ira_copies_num = 0;
1146 /* Return copy connecting A1 and A2 and originated from INSN of
1147 LOOP_TREE_NODE if any. */
1148 static ira_copy_t
1149 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1150 ira_loop_tree_node_t loop_tree_node)
1152 ira_copy_t cp, next_cp;
1153 ira_allocno_t another_a;
1155 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1157 if (cp->first == a1)
1159 next_cp = cp->next_first_allocno_copy;
1160 another_a = cp->second;
1162 else if (cp->second == a1)
1164 next_cp = cp->next_second_allocno_copy;
1165 another_a = cp->first;
1167 else
1168 gcc_unreachable ();
1169 if (another_a == a2 && cp->insn == insn
1170 && cp->loop_tree_node == loop_tree_node)
1171 return cp;
1173 return NULL;
1176 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1177 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1178 ira_copy_t
1179 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1180 bool constraint_p, rtx insn,
1181 ira_loop_tree_node_t loop_tree_node)
1183 ira_copy_t cp;
1185 cp = (ira_copy_t) pool_alloc (copy_pool);
1186 cp->num = ira_copies_num;
1187 cp->first = first;
1188 cp->second = second;
1189 cp->freq = freq;
1190 cp->constraint_p = constraint_p;
1191 cp->insn = insn;
1192 cp->loop_tree_node = loop_tree_node;
1193 VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
1194 ira_copies = VEC_address (ira_copy_t, copy_vec);
1195 ira_copies_num = VEC_length (ira_copy_t, copy_vec);
1196 return cp;
1199 /* Attach a copy CP to allocnos involved into the copy. */
1200 void
1201 ira_add_allocno_copy_to_list (ira_copy_t cp)
1203 ira_allocno_t first = cp->first, second = cp->second;
1205 cp->prev_first_allocno_copy = NULL;
1206 cp->prev_second_allocno_copy = NULL;
1207 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1208 if (cp->next_first_allocno_copy != NULL)
1210 if (cp->next_first_allocno_copy->first == first)
1211 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1212 else
1213 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1215 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1216 if (cp->next_second_allocno_copy != NULL)
1218 if (cp->next_second_allocno_copy->second == second)
1219 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1220 else
1221 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1223 ALLOCNO_COPIES (first) = cp;
1224 ALLOCNO_COPIES (second) = cp;
1227 /* Detach a copy CP from allocnos involved into the copy. */
1228 void
1229 ira_remove_allocno_copy_from_list (ira_copy_t cp)
1231 ira_allocno_t first = cp->first, second = cp->second;
1232 ira_copy_t prev, next;
1234 next = cp->next_first_allocno_copy;
1235 prev = cp->prev_first_allocno_copy;
1236 if (prev == NULL)
1237 ALLOCNO_COPIES (first) = next;
1238 else if (prev->first == first)
1239 prev->next_first_allocno_copy = next;
1240 else
1241 prev->next_second_allocno_copy = next;
1242 if (next != NULL)
1244 if (next->first == first)
1245 next->prev_first_allocno_copy = prev;
1246 else
1247 next->prev_second_allocno_copy = prev;
1249 cp->prev_first_allocno_copy = cp->next_first_allocno_copy = NULL;
1251 next = cp->next_second_allocno_copy;
1252 prev = cp->prev_second_allocno_copy;
1253 if (prev == NULL)
1254 ALLOCNO_COPIES (second) = next;
1255 else if (prev->second == second)
1256 prev->next_second_allocno_copy = next;
1257 else
1258 prev->next_first_allocno_copy = next;
1259 if (next != NULL)
1261 if (next->second == second)
1262 next->prev_second_allocno_copy = prev;
1263 else
1264 next->prev_first_allocno_copy = prev;
1266 cp->prev_second_allocno_copy = cp->next_second_allocno_copy = NULL;
1269 /* Make a copy CP a canonical copy where number of the
1270 first allocno is less than the second one. */
1271 void
1272 ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1274 ira_allocno_t temp;
1275 ira_copy_t temp_cp;
1277 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1278 return;
1280 temp = cp->first;
1281 cp->first = cp->second;
1282 cp->second = temp;
1284 temp_cp = cp->prev_first_allocno_copy;
1285 cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1286 cp->prev_second_allocno_copy = temp_cp;
1288 temp_cp = cp->next_first_allocno_copy;
1289 cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1290 cp->next_second_allocno_copy = temp_cp;
1293 /* Create (or update frequency if the copy already exists) and return
1294 the copy of allocnos FIRST and SECOND with frequency FREQ
1295 corresponding to move insn INSN (if any) and originated from
1296 LOOP_TREE_NODE. */
1297 ira_copy_t
1298 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1299 bool constraint_p, rtx insn,
1300 ira_loop_tree_node_t loop_tree_node)
1302 ira_copy_t cp;
1304 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1306 cp->freq += freq;
1307 return cp;
1309 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1310 loop_tree_node);
1311 ira_assert (first != NULL && second != NULL);
1312 ira_add_allocno_copy_to_list (cp);
1313 ira_swap_allocno_copy_ends_if_necessary (cp);
1314 return cp;
1317 /* Print info about copy CP into file F. */
1318 static void
1319 print_copy (FILE *f, ira_copy_t cp)
1321 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1322 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1323 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1324 cp->insn != NULL
1325 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1328 /* Print info about copy CP into stderr. */
1329 void
1330 ira_debug_copy (ira_copy_t cp)
1332 print_copy (stderr, cp);
1335 /* Print info about all copies into file F. */
1336 static void
1337 print_copies (FILE *f)
1339 ira_copy_t cp;
1340 ira_copy_iterator ci;
1342 FOR_EACH_COPY (cp, ci)
1343 print_copy (f, cp);
1346 /* Print info about all copies into stderr. */
1347 void
1348 ira_debug_copies (void)
1350 print_copies (stderr);
1353 /* Print info about copies involving allocno A into file F. */
1354 static void
1355 print_allocno_copies (FILE *f, ira_allocno_t a)
1357 ira_allocno_t another_a;
1358 ira_copy_t cp, next_cp;
1360 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1361 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1363 if (cp->first == a)
1365 next_cp = cp->next_first_allocno_copy;
1366 another_a = cp->second;
1368 else if (cp->second == a)
1370 next_cp = cp->next_second_allocno_copy;
1371 another_a = cp->first;
1373 else
1374 gcc_unreachable ();
1375 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1376 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1378 fprintf (f, "\n");
1381 /* Print info about copies involving allocno A into stderr. */
1382 void
1383 ira_debug_allocno_copies (ira_allocno_t a)
1385 print_allocno_copies (stderr, a);
1388 /* The function frees memory allocated for copy CP. */
1389 static void
1390 finish_copy (ira_copy_t cp)
1392 pool_free (copy_pool, cp);
1396 /* Free memory allocated for all copies. */
1397 static void
1398 finish_copies (void)
1400 ira_copy_t cp;
1401 ira_copy_iterator ci;
1403 FOR_EACH_COPY (cp, ci)
1404 finish_copy (cp);
1405 VEC_free (ira_copy_t, heap, copy_vec);
1406 free_alloc_pool (copy_pool);
1411 /* Pools for cost vectors. It is defined only for cover classes. */
1412 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1414 /* The function initiates work with hard register cost vectors. It
1415 creates allocation pool for each cover class. */
1416 static void
1417 initiate_cost_vectors (void)
1419 int i;
1420 enum reg_class cover_class;
1422 for (i = 0; i < ira_reg_class_cover_size; i++)
1424 cover_class = ira_reg_class_cover[i];
1425 cost_vector_pool[cover_class]
1426 = create_alloc_pool ("cost vectors",
1427 sizeof (int)
1428 * ira_class_hard_regs_num[cover_class],
1429 100);
1433 /* Allocate and return a cost vector VEC for COVER_CLASS. */
1434 int *
1435 ira_allocate_cost_vector (enum reg_class cover_class)
1437 return (int *) pool_alloc (cost_vector_pool[cover_class]);
1440 /* Free a cost vector VEC for COVER_CLASS. */
1441 void
1442 ira_free_cost_vector (int *vec, enum reg_class cover_class)
1444 ira_assert (vec != NULL);
1445 pool_free (cost_vector_pool[cover_class], vec);
1448 /* Finish work with hard register cost vectors. Release allocation
1449 pool for each cover class. */
1450 static void
1451 finish_cost_vectors (void)
1453 int i;
1454 enum reg_class cover_class;
1456 for (i = 0; i < ira_reg_class_cover_size; i++)
1458 cover_class = ira_reg_class_cover[i];
1459 free_alloc_pool (cost_vector_pool[cover_class]);
1465 /* The current loop tree node and its regno allocno map. */
1466 ira_loop_tree_node_t ira_curr_loop_tree_node;
1467 ira_allocno_t *ira_curr_regno_allocno_map;
1469 /* This recursive function traverses loop tree with root LOOP_NODE
1470 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1471 correspondingly in preorder and postorder. The function sets up
1472 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1473 basic block nodes of LOOP_NODE is also processed (before its
1474 subloop nodes). */
1475 void
1476 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1477 void (*preorder_func) (ira_loop_tree_node_t),
1478 void (*postorder_func) (ira_loop_tree_node_t))
1480 ira_loop_tree_node_t subloop_node;
1482 ira_assert (loop_node->bb == NULL);
1483 ira_curr_loop_tree_node = loop_node;
1484 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1486 if (preorder_func != NULL)
1487 (*preorder_func) (loop_node);
1489 if (bb_p)
1490 for (subloop_node = loop_node->children;
1491 subloop_node != NULL;
1492 subloop_node = subloop_node->next)
1493 if (subloop_node->bb != NULL)
1495 if (preorder_func != NULL)
1496 (*preorder_func) (subloop_node);
1498 if (postorder_func != NULL)
1499 (*postorder_func) (subloop_node);
1502 for (subloop_node = loop_node->subloops;
1503 subloop_node != NULL;
1504 subloop_node = subloop_node->subloop_next)
1506 ira_assert (subloop_node->bb == NULL);
1507 ira_traverse_loop_tree (bb_p, subloop_node,
1508 preorder_func, postorder_func);
1511 ira_curr_loop_tree_node = loop_node;
1512 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1514 if (postorder_func != NULL)
1515 (*postorder_func) (loop_node);
1520 /* The basic block currently being processed. */
1521 static basic_block curr_bb;
1523 /* This recursive function creates allocnos corresponding to
1524 pseudo-registers containing in X. True OUTPUT_P means that X is
1525 a lvalue. */
1526 static void
1527 create_insn_allocnos (rtx x, bool output_p)
1529 int i, j;
1530 const char *fmt;
1531 enum rtx_code code = GET_CODE (x);
1533 if (code == REG)
1535 int regno;
1537 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1539 ira_allocno_t a;
1541 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1542 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1544 ALLOCNO_NREFS (a)++;
1545 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1546 if (output_p)
1547 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1549 return;
1551 else if (code == SET)
1553 create_insn_allocnos (SET_DEST (x), true);
1554 create_insn_allocnos (SET_SRC (x), false);
1555 return;
1557 else if (code == CLOBBER)
1559 create_insn_allocnos (XEXP (x, 0), true);
1560 return;
1562 else if (code == MEM)
1564 create_insn_allocnos (XEXP (x, 0), false);
1565 return;
1567 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1568 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1570 create_insn_allocnos (XEXP (x, 0), true);
1571 create_insn_allocnos (XEXP (x, 0), false);
1572 return;
1575 fmt = GET_RTX_FORMAT (code);
1576 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1578 if (fmt[i] == 'e')
1579 create_insn_allocnos (XEXP (x, i), output_p);
1580 else if (fmt[i] == 'E')
1581 for (j = 0; j < XVECLEN (x, i); j++)
1582 create_insn_allocnos (XVECEXP (x, i, j), output_p);
1586 /* Create allocnos corresponding to pseudo-registers living in the
1587 basic block represented by the corresponding loop tree node
1588 BB_NODE. */
1589 static void
1590 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1592 basic_block bb;
1593 rtx insn;
1594 unsigned int i;
1595 bitmap_iterator bi;
1597 curr_bb = bb = bb_node->bb;
1598 ira_assert (bb != NULL);
1599 FOR_BB_INSNS_REVERSE (bb, insn)
1600 if (NONDEBUG_INSN_P (insn))
1601 create_insn_allocnos (PATTERN (insn), false);
1602 /* It might be a allocno living through from one subloop to
1603 another. */
1604 EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
1605 if (ira_curr_regno_allocno_map[i] == NULL)
1606 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1609 /* Create allocnos corresponding to pseudo-registers living on edge E
1610 (a loop entry or exit). Also mark the allocnos as living on the
1611 loop border. */
1612 static void
1613 create_loop_allocnos (edge e)
1615 unsigned int i;
1616 bitmap live_in_regs, border_allocnos;
1617 bitmap_iterator bi;
1618 ira_loop_tree_node_t parent;
1620 live_in_regs = DF_LR_IN (e->dest);
1621 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1622 EXECUTE_IF_SET_IN_REG_SET (DF_LR_OUT (e->src),
1623 FIRST_PSEUDO_REGISTER, i, bi)
1624 if (bitmap_bit_p (live_in_regs, i))
1626 if (ira_curr_regno_allocno_map[i] == NULL)
1628 /* The order of creations is important for right
1629 ira_regno_allocno_map. */
1630 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1631 && parent->regno_allocno_map[i] == NULL)
1632 ira_create_allocno (i, false, parent);
1633 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1635 bitmap_set_bit (border_allocnos,
1636 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1640 /* Create allocnos corresponding to pseudo-registers living in loop
1641 represented by the corresponding loop tree node LOOP_NODE. This
1642 function is called by ira_traverse_loop_tree. */
1643 static void
1644 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1646 if (loop_node->bb != NULL)
1647 create_bb_allocnos (loop_node);
1648 else if (loop_node != ira_loop_tree_root)
1650 int i;
1651 edge_iterator ei;
1652 edge e;
1653 VEC (edge, heap) *edges;
1655 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1656 if (e->src != loop_node->loop->latch)
1657 create_loop_allocnos (e);
1659 edges = get_loop_exit_edges (loop_node->loop);
1660 for (i = 0; VEC_iterate (edge, edges, i, e); i++)
1661 create_loop_allocnos (e);
1662 VEC_free (edge, heap, edges);
1666 /* Propagate information about allocnos modified inside the loop given
1667 by its LOOP_TREE_NODE to its parent. */
1668 static void
1669 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1671 if (loop_tree_node == ira_loop_tree_root)
1672 return;
1673 ira_assert (loop_tree_node->bb == NULL);
1674 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1675 loop_tree_node->modified_regnos);
1678 /* Propagate new info about allocno A (see comments about accumulated
1679 info in allocno definition) to the corresponding allocno on upper
1680 loop tree level. So allocnos on upper levels accumulate
1681 information about the corresponding allocnos in nested regions.
1682 The new info means allocno info finally calculated in this
1683 file. */
1684 static void
1685 propagate_allocno_info (void)
1687 int i;
1688 ira_allocno_t a, parent_a;
1689 ira_loop_tree_node_t parent;
1690 enum reg_class cover_class;
1692 if (flag_ira_region != IRA_REGION_ALL
1693 && flag_ira_region != IRA_REGION_MIXED)
1694 return;
1695 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1696 for (a = ira_regno_allocno_map[i];
1697 a != NULL;
1698 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1699 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1700 && (parent_a = parent->regno_allocno_map[i]) != NULL
1701 /* There are no caps yet at this point. So use
1702 border_allocnos to find allocnos for the propagation. */
1703 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1704 ALLOCNO_NUM (a)))
1706 if (! ALLOCNO_BAD_SPILL_P (a))
1707 ALLOCNO_BAD_SPILL_P (parent_a) = false;
1708 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1709 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1710 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1711 merge_hard_reg_conflicts (a, parent_a, true);
1712 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1713 += ALLOCNO_CALLS_CROSSED_NUM (a);
1714 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1715 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1716 cover_class = ALLOCNO_COVER_CLASS (a);
1717 ira_assert (cover_class == ALLOCNO_COVER_CLASS (parent_a));
1718 ira_allocate_and_accumulate_costs
1719 (&ALLOCNO_HARD_REG_COSTS (parent_a), cover_class,
1720 ALLOCNO_HARD_REG_COSTS (a));
1721 ira_allocate_and_accumulate_costs
1722 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1723 cover_class,
1724 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1725 ALLOCNO_COVER_CLASS_COST (parent_a)
1726 += ALLOCNO_COVER_CLASS_COST (a);
1727 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1731 /* Create allocnos corresponding to pseudo-registers in the current
1732 function. Traverse the loop tree for this. */
1733 static void
1734 create_allocnos (void)
1736 /* We need to process BB first to correctly link allocnos by member
1737 next_regno_allocno. */
1738 ira_traverse_loop_tree (true, ira_loop_tree_root,
1739 create_loop_tree_node_allocnos, NULL);
1740 if (optimize)
1741 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1742 propagate_modified_regnos);
1747 /* The page contains function to remove some regions from a separate
1748 register allocation. We remove regions whose separate allocation
1749 will hardly improve the result. As a result we speed up regional
1750 register allocation. */
1752 /* The function changes the object in range list given by R to OBJ. */
1753 static void
1754 change_object_in_range_list (live_range_t r, ira_object_t obj)
1756 for (; r != NULL; r = r->next)
1757 r->object = obj;
1760 /* Move all live ranges associated with allocno FROM to allocno TO. */
1761 static void
1762 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1764 int i;
1765 int n = ALLOCNO_NUM_OBJECTS (from);
1767 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1769 for (i = 0; i < n; i++)
1771 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1772 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1773 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1775 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1777 fprintf (ira_dump_file,
1778 " Moving ranges of a%dr%d to a%dr%d: ",
1779 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1780 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1781 ira_print_live_range_list (ira_dump_file, lr);
1783 change_object_in_range_list (lr, to_obj);
1784 OBJECT_LIVE_RANGES (to_obj)
1785 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1786 OBJECT_LIVE_RANGES (from_obj) = NULL;
1790 static void
1791 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1793 int i;
1794 int n = ALLOCNO_NUM_OBJECTS (from);
1796 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1798 for (i = 0; i < n; i++)
1800 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1801 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1802 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1804 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1806 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
1807 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1808 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1809 ira_print_live_range_list (ira_dump_file, lr);
1811 lr = ira_copy_live_range_list (lr);
1812 change_object_in_range_list (lr, to_obj);
1813 OBJECT_LIVE_RANGES (to_obj)
1814 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1818 /* Return TRUE if NODE represents a loop with low register
1819 pressure. */
1820 static bool
1821 low_pressure_loop_node_p (ira_loop_tree_node_t node)
1823 int i;
1824 enum reg_class cover_class;
1826 if (node->bb != NULL)
1827 return false;
1829 for (i = 0; i < ira_reg_class_cover_size; i++)
1831 cover_class = ira_reg_class_cover[i];
1832 if (node->reg_pressure[cover_class]
1833 > ira_available_class_regs[cover_class])
1834 return false;
1836 return true;
1839 /* Sort loops for marking them for removal. We put already marked
1840 loops first, then less frequent loops next, and then outer loops
1841 next. */
1842 static int
1843 loop_compare_func (const void *v1p, const void *v2p)
1845 int diff;
1846 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
1847 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
1849 ira_assert (l1->parent != NULL && l2->parent != NULL);
1850 if (l1->to_remove_p && ! l2->to_remove_p)
1851 return -1;
1852 if (! l1->to_remove_p && l2->to_remove_p)
1853 return 1;
1854 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
1855 return diff;
1856 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
1857 return diff;
1858 /* Make sorting stable. */
1859 return l1->loop->num - l2->loop->num;
1863 /* Mark loops which should be removed from regional allocation. We
1864 remove a loop with low register pressure inside another loop with
1865 register pressure. In this case a separate allocation of the loop
1866 hardly helps (for irregular register file architecture it could
1867 help by choosing a better hard register in the loop but we prefer
1868 faster allocation even in this case). We also remove cheap loops
1869 if there are more than IRA_MAX_LOOPS_NUM of them. */
1870 static void
1871 mark_loops_for_removal (void)
1873 int i, n;
1874 ira_loop_tree_node_t *sorted_loops;
1875 loop_p loop;
1877 sorted_loops
1878 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
1879 * VEC_length (loop_p,
1880 ira_loops.larray));
1881 for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1882 if (ira_loop_nodes[i].regno_allocno_map != NULL)
1884 if (ira_loop_nodes[i].parent == NULL)
1886 /* Don't remove the root. */
1887 ira_loop_nodes[i].to_remove_p = false;
1888 continue;
1890 sorted_loops[n++] = &ira_loop_nodes[i];
1891 ira_loop_nodes[i].to_remove_p
1892 = (low_pressure_loop_node_p (ira_loop_nodes[i].parent)
1893 && low_pressure_loop_node_p (&ira_loop_nodes[i]));
1895 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
1896 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
1898 sorted_loops[i]->to_remove_p = true;
1899 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1900 fprintf
1901 (ira_dump_file,
1902 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
1903 sorted_loops[i]->loop->num, sorted_loops[i]->loop->header->index,
1904 sorted_loops[i]->loop->header->frequency,
1905 loop_depth (sorted_loops[i]->loop),
1906 low_pressure_loop_node_p (sorted_loops[i]->parent)
1907 && low_pressure_loop_node_p (sorted_loops[i])
1908 ? "low pressure" : "cheap loop");
1910 ira_free (sorted_loops);
1913 /* Mark all loops but root for removing. */
1914 static void
1915 mark_all_loops_for_removal (void)
1917 int i;
1918 loop_p loop;
1920 for (i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1921 if (ira_loop_nodes[i].regno_allocno_map != NULL)
1923 if (ira_loop_nodes[i].parent == NULL)
1925 /* Don't remove the root. */
1926 ira_loop_nodes[i].to_remove_p = false;
1927 continue;
1929 ira_loop_nodes[i].to_remove_p = true;
1930 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1931 fprintf
1932 (ira_dump_file,
1933 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
1934 ira_loop_nodes[i].loop->num,
1935 ira_loop_nodes[i].loop->header->index,
1936 ira_loop_nodes[i].loop->header->frequency,
1937 loop_depth (ira_loop_nodes[i].loop));
1941 /* Definition of vector of loop tree nodes. */
1942 DEF_VEC_P(ira_loop_tree_node_t);
1943 DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
1945 /* Vec containing references to all removed loop tree nodes. */
1946 static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
1948 /* Vec containing references to all children of loop tree nodes. */
1949 static VEC(ira_loop_tree_node_t,heap) *children_vec;
1951 /* Remove subregions of NODE if their separate allocation will not
1952 improve the result. */
1953 static void
1954 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
1956 unsigned int start;
1957 bool remove_p;
1958 ira_loop_tree_node_t subnode;
1960 remove_p = node->to_remove_p;
1961 if (! remove_p)
1962 VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
1963 start = VEC_length (ira_loop_tree_node_t, children_vec);
1964 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
1965 if (subnode->bb == NULL)
1966 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
1967 else
1968 VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
1969 node->children = node->subloops = NULL;
1970 if (remove_p)
1972 VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
1973 return;
1975 while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
1977 subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
1978 subnode->parent = node;
1979 subnode->next = node->children;
1980 node->children = subnode;
1981 if (subnode->bb == NULL)
1983 subnode->subloop_next = node->subloops;
1984 node->subloops = subnode;
1989 /* Return TRUE if NODE is inside PARENT. */
1990 static bool
1991 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
1993 for (node = node->parent; node != NULL; node = node->parent)
1994 if (node == parent)
1995 return true;
1996 return false;
1999 /* Sort allocnos according to their order in regno allocno list. */
2000 static int
2001 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2003 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2004 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2005 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2006 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2008 if (loop_is_inside_p (n1, n2))
2009 return -1;
2010 else if (loop_is_inside_p (n2, n1))
2011 return 1;
2012 /* If allocnos are equally good, sort by allocno numbers, so that
2013 the results of qsort leave nothing to chance. We put allocnos
2014 with higher number first in the list because it is the original
2015 order for allocnos from loops on the same levels. */
2016 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2019 /* This array is used to sort allocnos to restore allocno order in
2020 the regno allocno list. */
2021 static ira_allocno_t *regno_allocnos;
2023 /* Restore allocno order for REGNO in the regno allocno list. */
2024 static void
2025 ira_rebuild_regno_allocno_list (int regno)
2027 int i, n;
2028 ira_allocno_t a;
2030 for (n = 0, a = ira_regno_allocno_map[regno];
2031 a != NULL;
2032 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2033 regno_allocnos[n++] = a;
2034 ira_assert (n > 0);
2035 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2036 regno_allocno_order_compare_func);
2037 for (i = 1; i < n; i++)
2038 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2039 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2040 ira_regno_allocno_map[regno] = regno_allocnos[0];
2041 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2042 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2045 /* Propagate info from allocno FROM_A to allocno A. */
2046 static void
2047 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2049 enum reg_class cover_class;
2051 merge_hard_reg_conflicts (from_a, a, false);
2052 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2053 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2054 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2055 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2056 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2057 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2058 if (! ALLOCNO_BAD_SPILL_P (from_a))
2059 ALLOCNO_BAD_SPILL_P (a) = false;
2060 cover_class = ALLOCNO_COVER_CLASS (from_a);
2061 ira_assert (cover_class == ALLOCNO_COVER_CLASS (a));
2062 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
2063 ALLOCNO_HARD_REG_COSTS (from_a));
2064 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2065 cover_class,
2066 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2067 ALLOCNO_COVER_CLASS_COST (a) += ALLOCNO_COVER_CLASS_COST (from_a);
2068 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2071 /* Remove allocnos from loops removed from the allocation
2072 consideration. */
2073 static void
2074 remove_unnecessary_allocnos (void)
2076 int regno;
2077 bool merged_p, rebuild_p;
2078 ira_allocno_t a, prev_a, next_a, parent_a;
2079 ira_loop_tree_node_t a_node, parent;
2081 merged_p = false;
2082 regno_allocnos = NULL;
2083 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2085 rebuild_p = false;
2086 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2087 a != NULL;
2088 a = next_a)
2090 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2091 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2092 if (! a_node->to_remove_p)
2093 prev_a = a;
2094 else
2096 for (parent = a_node->parent;
2097 (parent_a = parent->regno_allocno_map[regno]) == NULL
2098 && parent->to_remove_p;
2099 parent = parent->parent)
2101 if (parent_a == NULL)
2103 /* There are no allocnos with the same regno in
2104 upper region -- just move the allocno to the
2105 upper region. */
2106 prev_a = a;
2107 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2108 parent->regno_allocno_map[regno] = a;
2109 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2110 rebuild_p = true;
2112 else
2114 /* Remove the allocno and update info of allocno in
2115 the upper region. */
2116 if (prev_a == NULL)
2117 ira_regno_allocno_map[regno] = next_a;
2118 else
2119 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2120 move_allocno_live_ranges (a, parent_a);
2121 merged_p = true;
2122 propagate_some_info_from_allocno (parent_a, a);
2123 /* Remove it from the corresponding regno allocno
2124 map to avoid info propagation of subsequent
2125 allocno into this already removed allocno. */
2126 a_node->regno_allocno_map[regno] = NULL;
2127 finish_allocno (a);
2131 if (rebuild_p)
2132 /* We need to restore the order in regno allocno list. */
2134 if (regno_allocnos == NULL)
2135 regno_allocnos
2136 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2137 * ira_allocnos_num);
2138 ira_rebuild_regno_allocno_list (regno);
2141 if (merged_p)
2142 ira_rebuild_start_finish_chains ();
2143 if (regno_allocnos != NULL)
2144 ira_free (regno_allocnos);
2147 /* Remove allocnos from all loops but the root. */
2148 static void
2149 remove_low_level_allocnos (void)
2151 int regno;
2152 bool merged_p, propagate_p;
2153 ira_allocno_t a, top_a;
2154 ira_loop_tree_node_t a_node, parent;
2155 ira_allocno_iterator ai;
2157 merged_p = false;
2158 FOR_EACH_ALLOCNO (a, ai)
2160 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2161 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2162 continue;
2163 regno = ALLOCNO_REGNO (a);
2164 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2166 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2167 ira_loop_tree_root->regno_allocno_map[regno] = a;
2168 continue;
2170 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2171 /* Remove the allocno and update info of allocno in the upper
2172 region. */
2173 move_allocno_live_ranges (a, top_a);
2174 merged_p = true;
2175 if (propagate_p)
2176 propagate_some_info_from_allocno (top_a, a);
2178 FOR_EACH_ALLOCNO (a, ai)
2180 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2181 if (a_node == ira_loop_tree_root)
2182 continue;
2183 parent = a_node->parent;
2184 regno = ALLOCNO_REGNO (a);
2185 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2186 ira_assert (ALLOCNO_CAP (a) != NULL);
2187 else if (ALLOCNO_CAP (a) == NULL)
2188 ira_assert (parent->regno_allocno_map[regno] != NULL);
2190 FOR_EACH_ALLOCNO (a, ai)
2192 regno = ALLOCNO_REGNO (a);
2193 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2195 ira_object_t obj;
2196 ira_allocno_object_iterator oi;
2198 ira_regno_allocno_map[regno] = a;
2199 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2200 ALLOCNO_CAP_MEMBER (a) = NULL;
2201 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2202 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2203 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2204 #ifdef STACK_REGS
2205 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2206 ALLOCNO_NO_STACK_REG_P (a) = true;
2207 #endif
2209 else
2210 finish_allocno (a);
2212 if (merged_p)
2213 ira_rebuild_start_finish_chains ();
2216 /* Remove loops from consideration. We remove all loops except for
2217 root if ALL_P or loops for which a separate allocation will not
2218 improve the result. We have to do this after allocno creation and
2219 their costs and cover class evaluation because only after that the
2220 register pressure can be known and is calculated. */
2221 static void
2222 remove_unnecessary_regions (bool all_p)
2224 if (all_p)
2225 mark_all_loops_for_removal ();
2226 else
2227 mark_loops_for_removal ();
2228 children_vec
2229 = VEC_alloc (ira_loop_tree_node_t, heap,
2230 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2231 removed_loop_vec
2232 = VEC_alloc (ira_loop_tree_node_t, heap,
2233 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2234 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
2235 VEC_free (ira_loop_tree_node_t, heap, children_vec);
2236 if (all_p)
2237 remove_low_level_allocnos ();
2238 else
2239 remove_unnecessary_allocnos ();
2240 while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
2241 finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
2242 VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
2247 /* At this point true value of allocno attribute bad_spill_p means
2248 that there is an insn where allocno occurs and where the allocno
2249 can not be used as memory. The function updates the attribute, now
2250 it can be true only for allocnos which can not be used as memory in
2251 an insn and in whose live ranges there is other allocno deaths.
2252 Spilling allocnos with true value will not improve the code because
2253 it will not make other allocnos colorable and additional reloads
2254 for the corresponding pseudo will be generated in reload pass for
2255 each insn it occurs.
2257 This is a trick mentioned in one classic article of Chaitin etc
2258 which is frequently omitted in other implementations of RA based on
2259 graph coloring. */
2260 static void
2261 update_bad_spill_attribute (void)
2263 int i;
2264 ira_allocno_t a;
2265 ira_allocno_iterator ai;
2266 ira_allocno_object_iterator aoi;
2267 ira_object_t obj;
2268 live_range_t r;
2269 enum reg_class cover_class;
2270 bitmap_head dead_points[N_REG_CLASSES];
2272 for (i = 0; i < ira_reg_class_cover_size; i++)
2274 cover_class = ira_reg_class_cover[i];
2275 bitmap_initialize (&dead_points[cover_class], &reg_obstack);
2277 FOR_EACH_ALLOCNO (a, ai)
2279 cover_class = ALLOCNO_COVER_CLASS (a);
2280 if (cover_class == NO_REGS)
2281 continue;
2282 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2283 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2284 bitmap_set_bit (&dead_points[cover_class], r->finish);
2286 FOR_EACH_ALLOCNO (a, ai)
2288 cover_class = ALLOCNO_COVER_CLASS (a);
2289 if (cover_class == NO_REGS)
2290 continue;
2291 if (! ALLOCNO_BAD_SPILL_P (a))
2292 continue;
2293 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2295 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2297 for (i = r->start + 1; i < r->finish; i++)
2298 if (bitmap_bit_p (&dead_points[cover_class], i))
2299 break;
2300 if (i < r->finish)
2301 break;
2303 if (r != NULL)
2305 ALLOCNO_BAD_SPILL_P (a) = false;
2306 break;
2310 for (i = 0; i < ira_reg_class_cover_size; i++)
2312 cover_class = ira_reg_class_cover[i];
2313 bitmap_clear (&dead_points[cover_class]);
2319 /* Set up minimal and maximal live range points for allocnos. */
2320 static void
2321 setup_min_max_allocno_live_range_point (void)
2323 int i;
2324 ira_allocno_t a, parent_a, cap;
2325 ira_allocno_iterator ai;
2326 #ifdef ENABLE_IRA_CHECKING
2327 ira_object_iterator oi;
2328 ira_object_t obj;
2329 #endif
2330 live_range_t r;
2331 ira_loop_tree_node_t parent;
2333 FOR_EACH_ALLOCNO (a, ai)
2335 int n = ALLOCNO_NUM_OBJECTS (a);
2336 for (i = 0; i < n; i++)
2338 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2339 r = OBJECT_LIVE_RANGES (obj);
2340 if (r == NULL)
2341 continue;
2342 OBJECT_MAX (obj) = r->finish;
2343 for (; r->next != NULL; r = r->next)
2345 OBJECT_MIN (obj) = r->start;
2348 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2349 for (a = ira_regno_allocno_map[i];
2350 a != NULL;
2351 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2353 int j;
2354 int n = ALLOCNO_NUM_OBJECTS (a);
2355 for (j = 0; j < n; j++)
2357 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2358 ira_object_t parent_obj;
2360 if (OBJECT_MAX (obj) < 0)
2361 continue;
2362 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2363 /* Accumulation of range info. */
2364 if (ALLOCNO_CAP (a) != NULL)
2366 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2368 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2369 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2370 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2371 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2372 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2374 continue;
2376 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2377 continue;
2378 parent_a = parent->regno_allocno_map[i];
2379 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2380 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2381 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2382 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2383 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2386 #ifdef ENABLE_IRA_CHECKING
2387 FOR_EACH_OBJECT (obj, oi)
2389 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2390 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2391 continue;
2392 gcc_unreachable ();
2394 #endif
2397 /* Sort allocnos according to their live ranges. Allocnos with
2398 smaller cover class are put first unless we use priority coloring.
2399 Allocnos with the same cover class are ordered according their start
2400 (min). Allocnos with the same start are ordered according their
2401 finish (max). */
2402 static int
2403 object_range_compare_func (const void *v1p, const void *v2p)
2405 int diff;
2406 ira_object_t obj1 = *(const ira_object_t *) v1p;
2407 ira_object_t obj2 = *(const ira_object_t *) v2p;
2408 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2409 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2411 if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2412 && (diff = ALLOCNO_COVER_CLASS (a1) - ALLOCNO_COVER_CLASS (a2)) != 0)
2413 return diff;
2414 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2415 return diff;
2416 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2417 return diff;
2418 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2421 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2422 static void
2423 sort_conflict_id_map (void)
2425 int i, num;
2426 ira_allocno_t a;
2427 ira_allocno_iterator ai;
2429 num = 0;
2430 FOR_EACH_ALLOCNO (a, ai)
2432 ira_allocno_object_iterator oi;
2433 ira_object_t obj;
2435 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2436 ira_object_id_map[num++] = obj;
2438 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2439 object_range_compare_func);
2440 for (i = 0; i < num; i++)
2442 ira_object_t obj = ira_object_id_map[i];
2443 gcc_assert (obj != NULL);
2444 OBJECT_CONFLICT_ID (obj) = i;
2446 for (i = num; i < ira_objects_num; i++)
2447 ira_object_id_map[i] = NULL;
2450 /* Set up minimal and maximal conflict ids of allocnos with which
2451 given allocno can conflict. */
2452 static void
2453 setup_min_max_conflict_allocno_ids (void)
2455 int cover_class;
2456 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2457 int *live_range_min, *last_lived;
2458 int word0_min, word0_max;
2459 ira_allocno_t a;
2460 ira_allocno_iterator ai;
2462 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2463 cover_class = -1;
2464 first_not_finished = -1;
2465 for (i = 0; i < ira_objects_num; i++)
2467 ira_object_t obj = ira_object_id_map[i];
2468 if (obj == NULL)
2469 continue;
2471 a = OBJECT_ALLOCNO (obj);
2473 if (cover_class < 0
2474 || (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2475 && cover_class != (int) ALLOCNO_COVER_CLASS (a)))
2477 cover_class = ALLOCNO_COVER_CLASS (a);
2478 min = i;
2479 first_not_finished = i;
2481 else
2483 start = OBJECT_MIN (obj);
2484 /* If we skip an allocno, the allocno with smaller ids will
2485 be also skipped because of the secondary sorting the
2486 range finishes (see function
2487 object_range_compare_func). */
2488 while (first_not_finished < i
2489 && start > OBJECT_MAX (ira_object_id_map
2490 [first_not_finished]))
2491 first_not_finished++;
2492 min = first_not_finished;
2494 if (min == i)
2495 /* We could increase min further in this case but it is good
2496 enough. */
2497 min++;
2498 live_range_min[i] = OBJECT_MIN (obj);
2499 OBJECT_MIN (obj) = min;
2501 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2502 cover_class = -1;
2503 filled_area_start = -1;
2504 for (i = ira_objects_num - 1; i >= 0; i--)
2506 ira_object_t obj = ira_object_id_map[i];
2507 if (obj == NULL)
2508 continue;
2510 a = OBJECT_ALLOCNO (obj);
2511 if (cover_class < 0
2512 || (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2513 && cover_class != (int) ALLOCNO_COVER_CLASS (a)))
2515 cover_class = ALLOCNO_COVER_CLASS (a);
2516 for (j = 0; j < ira_max_point; j++)
2517 last_lived[j] = -1;
2518 filled_area_start = ira_max_point;
2520 min = live_range_min[i];
2521 finish = OBJECT_MAX (obj);
2522 max = last_lived[finish];
2523 if (max < 0)
2524 /* We could decrease max further in this case but it is good
2525 enough. */
2526 max = OBJECT_CONFLICT_ID (obj) - 1;
2527 OBJECT_MAX (obj) = max;
2528 /* In filling, we can go further A range finish to recognize
2529 intersection quickly because if the finish of subsequently
2530 processed allocno (it has smaller conflict id) range is
2531 further A range finish than they are definitely intersected
2532 (the reason for this is the allocnos with bigger conflict id
2533 have their range starts not smaller than allocnos with
2534 smaller ids. */
2535 for (j = min; j < filled_area_start; j++)
2536 last_lived[j] = i;
2537 filled_area_start = min;
2539 ira_free (last_lived);
2540 ira_free (live_range_min);
2542 /* For allocnos with more than one object, we may later record extra conflicts in
2543 subobject 0 that we cannot really know about here.
2544 For now, simply widen the min/max range of these subobjects. */
2546 word0_min = INT_MAX;
2547 word0_max = INT_MIN;
2549 FOR_EACH_ALLOCNO (a, ai)
2551 int n = ALLOCNO_NUM_OBJECTS (a);
2552 ira_object_t obj0;
2553 if (n < 2)
2554 continue;
2555 obj0 = ALLOCNO_OBJECT (a, 0);
2556 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2557 word0_min = OBJECT_CONFLICT_ID (obj0);
2558 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2559 word0_max = OBJECT_CONFLICT_ID (obj0);
2561 FOR_EACH_ALLOCNO (a, ai)
2563 int n = ALLOCNO_NUM_OBJECTS (a);
2564 ira_object_t obj0;
2565 if (n < 2)
2566 continue;
2567 obj0 = ALLOCNO_OBJECT (a, 0);
2568 if (OBJECT_MIN (obj0) > word0_min)
2569 OBJECT_MIN (obj0) = word0_min;
2570 if (OBJECT_MAX (obj0) < word0_max)
2571 OBJECT_MAX (obj0) = word0_max;
2577 static void
2578 create_caps (void)
2580 ira_allocno_t a;
2581 ira_allocno_iterator ai;
2582 ira_loop_tree_node_t loop_tree_node;
2584 FOR_EACH_ALLOCNO (a, ai)
2586 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2587 continue;
2588 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2589 create_cap_allocno (a);
2590 else if (ALLOCNO_CAP (a) == NULL)
2592 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2593 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2594 create_cap_allocno (a);
2601 /* The page contains code transforming more one region internal
2602 representation (IR) to one region IR which is necessary for reload.
2603 This transformation is called IR flattening. We might just rebuild
2604 the IR for one region but we don't do it because it takes a lot of
2605 time. */
2607 /* Map: regno -> allocnos which will finally represent the regno for
2608 IR with one region. */
2609 static ira_allocno_t *regno_top_level_allocno_map;
2611 /* Find the allocno that corresponds to A at a level one higher up in the
2612 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2613 ira_allocno_t
2614 ira_parent_allocno (ira_allocno_t a)
2616 ira_loop_tree_node_t parent;
2618 if (ALLOCNO_CAP (a) != NULL)
2619 return NULL;
2621 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2622 if (parent == NULL)
2623 return NULL;
2625 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
2628 /* Find the allocno that corresponds to A at a level one higher up in the
2629 loop tree. If ALLOCNO_CAP is set for A, return that. */
2630 ira_allocno_t
2631 ira_parent_or_cap_allocno (ira_allocno_t a)
2633 if (ALLOCNO_CAP (a) != NULL)
2634 return ALLOCNO_CAP (a);
2636 return ira_parent_allocno (a);
2639 /* Process all allocnos originated from pseudo REGNO and copy live
2640 ranges, hard reg conflicts, and allocno stack reg attributes from
2641 low level allocnos to final allocnos which are destinations of
2642 removed stores at a loop exit. Return true if we copied live
2643 ranges. */
2644 static bool
2645 copy_info_to_removed_store_destinations (int regno)
2647 ira_allocno_t a;
2648 ira_allocno_t parent_a = NULL;
2649 ira_loop_tree_node_t parent;
2650 bool merged_p;
2652 merged_p = false;
2653 for (a = ira_regno_allocno_map[regno];
2654 a != NULL;
2655 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2657 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))])
2658 /* This allocno will be removed. */
2659 continue;
2661 /* Caps will be removed. */
2662 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2663 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2664 parent != NULL;
2665 parent = parent->parent)
2666 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2667 || (parent_a == regno_top_level_allocno_map[REGNO (ALLOCNO_REG
2668 (parent_a))]
2669 && ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)))
2670 break;
2671 if (parent == NULL || parent_a == NULL)
2672 continue;
2674 copy_allocno_live_ranges (a, parent_a);
2675 merge_hard_reg_conflicts (a, parent_a, true);
2677 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2678 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2679 += ALLOCNO_CALLS_CROSSED_NUM (a);
2680 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2681 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2682 merged_p = true;
2684 return merged_p;
2687 /* Flatten the IR. In other words, this function transforms IR as if
2688 it were built with one region (without loops). We could make it
2689 much simpler by rebuilding IR with one region, but unfortunately it
2690 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
2691 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2692 IRA_MAX_POINT before emitting insns on the loop borders. */
2693 void
2694 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2696 int i, j;
2697 bool keep_p;
2698 int hard_regs_num;
2699 bool new_pseudos_p, merged_p, mem_dest_p;
2700 unsigned int n;
2701 enum reg_class cover_class;
2702 ira_allocno_t a, parent_a, first, second, node_first, node_second;
2703 ira_copy_t cp;
2704 ira_loop_tree_node_t node;
2705 live_range_t r;
2706 ira_allocno_iterator ai;
2707 ira_copy_iterator ci;
2709 regno_top_level_allocno_map
2710 = (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
2711 memset (regno_top_level_allocno_map, 0,
2712 max_reg_num () * sizeof (ira_allocno_t));
2713 new_pseudos_p = merged_p = false;
2714 FOR_EACH_ALLOCNO (a, ai)
2716 ira_allocno_object_iterator oi;
2717 ira_object_t obj;
2718 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2719 /* Caps are not in the regno allocno maps and they are never
2720 will be transformed into allocnos existing after IR
2721 flattening. */
2722 continue;
2723 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2724 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2725 OBJECT_CONFLICT_HARD_REGS (obj));
2726 #ifdef STACK_REGS
2727 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2728 #endif
2730 /* Fix final allocno attributes. */
2731 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2733 mem_dest_p = false;
2734 for (a = ira_regno_allocno_map[i];
2735 a != NULL;
2736 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2738 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2739 if (ALLOCNO_SOMEWHERE_RENAMED_P (a))
2740 new_pseudos_p = true;
2741 parent_a = ira_parent_allocno (a);
2742 if (parent_a == NULL)
2744 ALLOCNO_COPIES (a) = NULL;
2745 regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
2746 continue;
2748 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2750 if (ALLOCNO_MEM_OPTIMIZED_DEST (a) != NULL)
2751 mem_dest_p = true;
2752 if (REGNO (ALLOCNO_REG (a)) == REGNO (ALLOCNO_REG (parent_a)))
2754 merge_hard_reg_conflicts (a, parent_a, true);
2755 move_allocno_live_ranges (a, parent_a);
2756 merged_p = true;
2757 ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
2758 = (ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
2759 || ALLOCNO_MEM_OPTIMIZED_DEST_P (a));
2760 continue;
2762 new_pseudos_p = true;
2763 for (;;)
2765 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2766 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2767 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2768 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2769 -= ALLOCNO_CALLS_CROSSED_NUM (a);
2770 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2771 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2772 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2773 && ALLOCNO_NREFS (parent_a) >= 0
2774 && ALLOCNO_FREQ (parent_a) >= 0);
2775 cover_class = ALLOCNO_COVER_CLASS (parent_a);
2776 hard_regs_num = ira_class_hard_regs_num[cover_class];
2777 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2778 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2779 for (j = 0; j < hard_regs_num; j++)
2780 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2781 -= ALLOCNO_HARD_REG_COSTS (a)[j];
2782 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2783 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2784 for (j = 0; j < hard_regs_num; j++)
2785 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2786 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2787 ALLOCNO_COVER_CLASS_COST (parent_a)
2788 -= ALLOCNO_COVER_CLASS_COST (a);
2789 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2790 parent_a = ira_parent_allocno (parent_a);
2791 if (parent_a == NULL)
2792 break;
2794 ALLOCNO_COPIES (a) = NULL;
2795 regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
2797 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2798 merged_p = true;
2800 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2801 if (merged_p || ira_max_point_before_emit != ira_max_point)
2802 ira_rebuild_start_finish_chains ();
2803 if (new_pseudos_p)
2805 sparseset objects_live;
2807 /* Rebuild conflicts. */
2808 FOR_EACH_ALLOCNO (a, ai)
2810 ira_allocno_object_iterator oi;
2811 ira_object_t obj;
2812 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2813 || ALLOCNO_CAP_MEMBER (a) != NULL)
2814 continue;
2815 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2817 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2818 ira_assert (r->object == obj);
2819 clear_conflicts (obj);
2822 objects_live = sparseset_alloc (ira_objects_num);
2823 for (i = 0; i < ira_max_point; i++)
2825 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
2827 ira_object_t obj = r->object;
2828 a = OBJECT_ALLOCNO (obj);
2829 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2830 || ALLOCNO_CAP_MEMBER (a) != NULL)
2831 continue;
2833 cover_class = ALLOCNO_COVER_CLASS (a);
2834 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
2835 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
2837 ira_object_t live_obj = ira_object_id_map[n];
2838 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
2839 enum reg_class live_cover = ALLOCNO_COVER_CLASS (live_a);
2840 if (ira_reg_classes_intersect_p[cover_class][live_cover]
2841 /* Don't set up conflict for the allocno with itself. */
2842 && live_a != a)
2843 ira_add_conflict (obj, live_obj);
2847 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
2848 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
2850 sparseset_free (objects_live);
2851 compress_conflict_vecs ();
2853 /* Mark some copies for removing and change allocnos in the rest
2854 copies. */
2855 FOR_EACH_COPY (cp, ci)
2857 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
2858 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
2860 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2861 fprintf
2862 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
2863 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
2864 ALLOCNO_NUM (cp->first), REGNO (ALLOCNO_REG (cp->first)),
2865 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
2866 ALLOCNO_NUM (cp->second), REGNO (ALLOCNO_REG (cp->second)));
2867 cp->loop_tree_node = NULL;
2868 continue;
2870 first = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->first))];
2871 second = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->second))];
2872 node = cp->loop_tree_node;
2873 if (node == NULL)
2874 keep_p = true; /* It copy generated in ira-emit.c. */
2875 else
2877 /* Check that the copy was not propagated from level on
2878 which we will have different pseudos. */
2879 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
2880 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
2881 keep_p = ((REGNO (ALLOCNO_REG (first))
2882 == REGNO (ALLOCNO_REG (node_first)))
2883 && (REGNO (ALLOCNO_REG (second))
2884 == REGNO (ALLOCNO_REG (node_second))));
2886 if (keep_p)
2888 cp->loop_tree_node = ira_loop_tree_root;
2889 cp->first = first;
2890 cp->second = second;
2892 else
2894 cp->loop_tree_node = NULL;
2895 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2896 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
2897 cp->num, ALLOCNO_NUM (cp->first),
2898 REGNO (ALLOCNO_REG (cp->first)), ALLOCNO_NUM (cp->second),
2899 REGNO (ALLOCNO_REG (cp->second)));
2902 /* Remove unnecessary allocnos on lower levels of the loop tree. */
2903 FOR_EACH_ALLOCNO (a, ai)
2905 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2906 || ALLOCNO_CAP_MEMBER (a) != NULL)
2908 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2909 fprintf (ira_dump_file, " Remove a%dr%d\n",
2910 ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
2911 finish_allocno (a);
2912 continue;
2914 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2915 ALLOCNO_REGNO (a) = REGNO (ALLOCNO_REG (a));
2916 ALLOCNO_CAP (a) = NULL;
2917 /* Restore updated costs for assignments from reload. */
2918 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
2919 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
2920 if (! ALLOCNO_ASSIGNED_P (a))
2921 ira_free_allocno_updated_costs (a);
2922 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2923 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2925 /* Remove unnecessary copies. */
2926 FOR_EACH_COPY (cp, ci)
2928 if (cp->loop_tree_node == NULL)
2930 ira_copies[cp->num] = NULL;
2931 finish_copy (cp);
2932 continue;
2934 ira_assert
2935 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
2936 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
2937 ira_add_allocno_copy_to_list (cp);
2938 ira_swap_allocno_copy_ends_if_necessary (cp);
2940 rebuild_regno_allocno_maps ();
2941 if (ira_max_point != ira_max_point_before_emit)
2942 ira_compress_allocno_live_ranges ();
2943 ira_free (regno_top_level_allocno_map);
2948 #ifdef ENABLE_IRA_CHECKING
2949 /* Check creation of all allocnos. Allocnos on lower levels should
2950 have allocnos or caps on all upper levels. */
2951 static void
2952 check_allocno_creation (void)
2954 ira_allocno_t a;
2955 ira_allocno_iterator ai;
2956 ira_loop_tree_node_t loop_tree_node;
2958 FOR_EACH_ALLOCNO (a, ai)
2960 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2961 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
2962 ALLOCNO_NUM (a)));
2963 if (loop_tree_node == ira_loop_tree_root)
2964 continue;
2965 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2966 ira_assert (ALLOCNO_CAP (a) != NULL);
2967 else if (ALLOCNO_CAP (a) == NULL)
2968 ira_assert (loop_tree_node->parent
2969 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
2970 && bitmap_bit_p (loop_tree_node->border_allocnos,
2971 ALLOCNO_NUM (a)));
2974 #endif
2976 /* Identify allocnos which prefer a register class with a single hard register.
2977 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
2978 less likely to use the preferred singleton register. */
2979 static void
2980 update_conflict_hard_reg_costs (void)
2982 ira_allocno_t a;
2983 ira_allocno_iterator ai;
2984 int i, index, min;
2986 FOR_EACH_ALLOCNO (a, ai)
2988 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
2989 enum reg_class pref = reg_preferred_class (ALLOCNO_REGNO (a));
2991 if (reg_class_size[pref] != 1)
2992 continue;
2993 index = (ira_class_hard_reg_index[cover_class]
2994 [ira_class_hard_regs[pref][0]]);
2995 if (index < 0)
2996 continue;
2997 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
2998 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
2999 continue;
3000 min = INT_MAX;
3001 for (i = ira_class_hard_regs_num[cover_class] - 1; i >= 0; i--)
3002 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_COVER_CLASS_COST (a)
3003 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3004 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3005 if (min == INT_MAX)
3006 continue;
3007 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3008 cover_class, 0);
3009 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3010 -= min - ALLOCNO_COVER_CLASS_COST (a);
3014 /* Create a internal representation (IR) for IRA (allocnos, copies,
3015 loop tree nodes). If LOOPS_P is FALSE the nodes corresponding to
3016 the loops (except the root which corresponds the all function) and
3017 correspondingly allocnos for the loops will be not created. Such
3018 parameter value is used for Chaitin-Briggs coloring. The function
3019 returns TRUE if we generate loop structure (besides nodes
3020 representing all function and the basic blocks) for regional
3021 allocation. A true return means that we really need to flatten IR
3022 before the reload. */
3023 bool
3024 ira_build (bool loops_p)
3026 df_analyze ();
3028 initiate_cost_vectors ();
3029 initiate_allocnos ();
3030 initiate_copies ();
3031 create_loop_tree_nodes (loops_p);
3032 form_loop_tree ();
3033 create_allocnos ();
3034 ira_costs ();
3035 create_allocno_objects ();
3036 ira_create_allocno_live_ranges ();
3037 remove_unnecessary_regions (false);
3038 ira_compress_allocno_live_ranges ();
3039 update_bad_spill_attribute ();
3040 loops_p = more_one_region_p ();
3041 if (loops_p)
3043 propagate_allocno_info ();
3044 create_caps ();
3046 ira_tune_allocno_costs_and_cover_classes ();
3047 #ifdef ENABLE_IRA_CHECKING
3048 check_allocno_creation ();
3049 #endif
3050 setup_min_max_allocno_live_range_point ();
3051 sort_conflict_id_map ();
3052 setup_min_max_conflict_allocno_ids ();
3053 ira_build_conflicts ();
3054 update_conflict_hard_reg_costs ();
3055 if (! ira_conflicts_p)
3057 ira_allocno_t a;
3058 ira_allocno_iterator ai;
3060 /* Remove all regions but root one. */
3061 if (loops_p)
3063 remove_unnecessary_regions (true);
3064 loops_p = false;
3066 /* We don't save hard registers around calls for fast allocation
3067 -- add caller clobbered registers as conflicting ones to
3068 allocno crossing calls. */
3069 FOR_EACH_ALLOCNO (a, ai)
3070 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3071 ior_hard_reg_conflicts (a, &call_used_reg_set);
3073 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3074 print_copies (ira_dump_file);
3075 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3077 int n, nr, nr_big;
3078 ira_allocno_t a;
3079 live_range_t r;
3080 ira_allocno_iterator ai;
3082 n = 0;
3083 nr = 0;
3084 nr_big = 0;
3085 FOR_EACH_ALLOCNO (a, ai)
3087 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3088 if (nobj > 1)
3089 nr_big++;
3090 for (j = 0; j < nobj; j++)
3092 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3093 n += OBJECT_NUM_CONFLICTS (obj);
3094 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3095 nr++;
3098 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3099 VEC_length (loop_p, ira_loops.larray), n_basic_blocks,
3100 ira_max_point);
3101 fprintf (ira_dump_file,
3102 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3103 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3105 return loops_p;
3108 /* Release the data created by function ira_build. */
3109 void
3110 ira_destroy (void)
3112 finish_loop_tree_nodes ();
3113 finish_copies ();
3114 finish_allocnos ();
3115 finish_cost_vectors ();
3116 ira_finish_allocno_live_ranges ();