PR 47802 Use strftime for CTIME and FDATE intrinsics
[official-gcc.git] / gcc / ira-build.c
blobb3c1e14f8f756ca1b04a4d9859f15219479cfbca
1 /* Building internal representation for IRA.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "params.h"
37 #include "df.h"
38 #include "output.h"
39 #include "reload.h"
40 #include "sparseset.h"
41 #include "ira-int.h"
42 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
44 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
45 ira_loop_tree_node_t);
47 /* The root of the loop tree corresponding to the all function. */
48 ira_loop_tree_node_t ira_loop_tree_root;
50 /* Height of the loop tree. */
51 int ira_loop_tree_height;
53 /* All nodes representing basic blocks are referred through the
54 following array. We can not use basic block member `aux' for this
55 because it is used for insertion of insns on edges. */
56 ira_loop_tree_node_t ira_bb_nodes;
58 /* All nodes representing loops are referred through the following
59 array. */
60 ira_loop_tree_node_t ira_loop_nodes;
62 /* Map regno -> allocnos with given regno (see comments for
63 allocno member `next_regno_allocno'). */
64 ira_allocno_t *ira_regno_allocno_map;
66 /* Array of references to all allocnos. The order number of the
67 allocno corresponds to the index in the array. Removed allocnos
68 have NULL element value. */
69 ira_allocno_t *ira_allocnos;
71 /* Sizes of the previous array. */
72 int ira_allocnos_num;
74 /* Count of conflict record structures we've created, used when creating
75 a new conflict id. */
76 int ira_objects_num;
78 /* Map a conflict id to its conflict record. */
79 ira_object_t *ira_object_id_map;
81 /* Array of references to all copies. The order number of the copy
82 corresponds to the index in the array. Removed copies have NULL
83 element value. */
84 ira_copy_t *ira_copies;
86 /* Size of the previous array. */
87 int ira_copies_num;
91 /* LAST_BASIC_BLOCK before generating additional insns because of live
92 range splitting. Emitting insns on a critical edge creates a new
93 basic block. */
94 static int last_basic_block_before_change;
96 /* The following function allocates the loop tree nodes. If LOOPS_P
97 is FALSE, the nodes corresponding to the loops (except the root
98 which corresponds the all function) will be not allocated but nodes
99 will still be allocated for basic blocks. */
100 static void
101 create_loop_tree_nodes (bool loops_p)
103 unsigned int i, j;
104 int max_regno;
105 bool skip_p;
106 edge_iterator ei;
107 edge e;
108 VEC (edge, heap) *edges;
109 loop_p loop;
111 ira_bb_nodes
112 = ((struct ira_loop_tree_node *)
113 ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
114 last_basic_block_before_change = last_basic_block;
115 for (i = 0; i < (unsigned int) last_basic_block; i++)
117 ira_bb_nodes[i].regno_allocno_map = NULL;
118 memset (ira_bb_nodes[i].reg_pressure, 0,
119 sizeof (ira_bb_nodes[i].reg_pressure));
120 ira_bb_nodes[i].all_allocnos = NULL;
121 ira_bb_nodes[i].modified_regnos = NULL;
122 ira_bb_nodes[i].border_allocnos = NULL;
123 ira_bb_nodes[i].local_copies = NULL;
125 ira_loop_nodes = ((struct ira_loop_tree_node *)
126 ira_allocate (sizeof (struct ira_loop_tree_node)
127 * VEC_length (loop_p, ira_loops.larray)));
128 max_regno = max_reg_num ();
129 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
131 if (loop != ira_loops.tree_root)
133 ira_loop_nodes[i].regno_allocno_map = NULL;
134 if (! loops_p)
135 continue;
136 skip_p = false;
137 FOR_EACH_EDGE (e, ei, loop->header->preds)
138 if (e->src != loop->latch
139 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
141 skip_p = true;
142 break;
144 if (skip_p)
145 continue;
146 edges = get_loop_exit_edges (loop);
147 FOR_EACH_VEC_ELT (edge, edges, j, e)
148 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
150 skip_p = true;
151 break;
153 VEC_free (edge, heap, edges);
154 if (skip_p)
155 continue;
157 ira_loop_nodes[i].regno_allocno_map
158 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
159 memset (ira_loop_nodes[i].regno_allocno_map, 0,
160 sizeof (ira_allocno_t) * max_regno);
161 memset (ira_loop_nodes[i].reg_pressure, 0,
162 sizeof (ira_loop_nodes[i].reg_pressure));
163 ira_loop_nodes[i].all_allocnos = ira_allocate_bitmap ();
164 ira_loop_nodes[i].modified_regnos = ira_allocate_bitmap ();
165 ira_loop_nodes[i].border_allocnos = ira_allocate_bitmap ();
166 ira_loop_nodes[i].local_copies = ira_allocate_bitmap ();
170 /* The function returns TRUE if there are more one allocation
171 region. */
172 static bool
173 more_one_region_p (void)
175 unsigned int i;
176 loop_p loop;
178 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
179 if (ira_loop_nodes[i].regno_allocno_map != NULL
180 && ira_loop_tree_root != &ira_loop_nodes[i])
181 return true;
182 return false;
185 /* Free the loop tree node of a loop. */
186 static void
187 finish_loop_tree_node (ira_loop_tree_node_t loop)
189 if (loop->regno_allocno_map != NULL)
191 ira_assert (loop->bb == NULL);
192 ira_free_bitmap (loop->local_copies);
193 ira_free_bitmap (loop->border_allocnos);
194 ira_free_bitmap (loop->modified_regnos);
195 ira_free_bitmap (loop->all_allocnos);
196 ira_free (loop->regno_allocno_map);
197 loop->regno_allocno_map = NULL;
201 /* Free the loop tree nodes. */
202 static void
203 finish_loop_tree_nodes (void)
205 unsigned int i;
206 loop_p loop;
208 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
209 finish_loop_tree_node (&ira_loop_nodes[i]);
210 ira_free (ira_loop_nodes);
211 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
213 if (ira_bb_nodes[i].local_copies != NULL)
214 ira_free_bitmap (ira_bb_nodes[i].local_copies);
215 if (ira_bb_nodes[i].border_allocnos != NULL)
216 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
217 if (ira_bb_nodes[i].modified_regnos != NULL)
218 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
219 if (ira_bb_nodes[i].all_allocnos != NULL)
220 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
221 if (ira_bb_nodes[i].regno_allocno_map != NULL)
222 ira_free (ira_bb_nodes[i].regno_allocno_map);
224 ira_free (ira_bb_nodes);
229 /* The following recursive function adds LOOP to the loop tree
230 hierarchy. LOOP is added only once. */
231 static void
232 add_loop_to_tree (struct loop *loop)
234 struct loop *parent;
235 ira_loop_tree_node_t loop_node, parent_node;
237 /* We can not use loop node access macros here because of potential
238 checking and because the nodes are not initialized enough
239 yet. */
240 if (loop_outer (loop) != NULL)
241 add_loop_to_tree (loop_outer (loop));
242 if (ira_loop_nodes[loop->num].regno_allocno_map != NULL
243 && ira_loop_nodes[loop->num].children == NULL)
245 /* We have not added loop node to the tree yet. */
246 loop_node = &ira_loop_nodes[loop->num];
247 loop_node->loop = loop;
248 loop_node->bb = NULL;
249 for (parent = loop_outer (loop);
250 parent != NULL;
251 parent = loop_outer (parent))
252 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
253 break;
254 if (parent == NULL)
256 loop_node->next = NULL;
257 loop_node->subloop_next = NULL;
258 loop_node->parent = NULL;
260 else
262 parent_node = &ira_loop_nodes[parent->num];
263 loop_node->next = parent_node->children;
264 parent_node->children = loop_node;
265 loop_node->subloop_next = parent_node->subloops;
266 parent_node->subloops = loop_node;
267 loop_node->parent = parent_node;
272 /* The following recursive function sets up levels of nodes of the
273 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
274 The function returns maximal value of level in the tree + 1. */
275 static int
276 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
278 int height, max_height;
279 ira_loop_tree_node_t subloop_node;
281 ira_assert (loop_node->bb == NULL);
282 loop_node->level = level;
283 max_height = level + 1;
284 for (subloop_node = loop_node->subloops;
285 subloop_node != NULL;
286 subloop_node = subloop_node->subloop_next)
288 ira_assert (subloop_node->bb == NULL);
289 height = setup_loop_tree_level (subloop_node, level + 1);
290 if (height > max_height)
291 max_height = height;
293 return max_height;
296 /* Create the loop tree. The algorithm is designed to provide correct
297 order of loops (they are ordered by their last loop BB) and basic
298 blocks in the chain formed by member next. */
299 static void
300 form_loop_tree (void)
302 unsigned int i;
303 basic_block bb;
304 struct loop *parent;
305 ira_loop_tree_node_t bb_node, loop_node;
306 loop_p loop;
308 /* We can not use loop/bb node access macros because of potential
309 checking and because the nodes are not initialized enough
310 yet. */
311 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
312 if (ira_loop_nodes[i].regno_allocno_map != NULL)
314 ira_loop_nodes[i].children = NULL;
315 ira_loop_nodes[i].subloops = NULL;
317 FOR_EACH_BB (bb)
319 bb_node = &ira_bb_nodes[bb->index];
320 bb_node->bb = bb;
321 bb_node->loop = NULL;
322 bb_node->subloops = NULL;
323 bb_node->children = NULL;
324 bb_node->subloop_next = NULL;
325 bb_node->next = NULL;
326 for (parent = bb->loop_father;
327 parent != NULL;
328 parent = loop_outer (parent))
329 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
330 break;
331 add_loop_to_tree (parent);
332 loop_node = &ira_loop_nodes[parent->num];
333 bb_node->next = loop_node->children;
334 bb_node->parent = loop_node;
335 loop_node->children = bb_node;
337 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (ira_loops.tree_root->num);
338 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
339 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
344 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
345 tree nodes. */
346 static void
347 rebuild_regno_allocno_maps (void)
349 unsigned int l;
350 int max_regno, regno;
351 ira_allocno_t a;
352 ira_loop_tree_node_t loop_tree_node;
353 loop_p loop;
354 ira_allocno_iterator ai;
356 max_regno = max_reg_num ();
357 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, l, loop)
358 if (ira_loop_nodes[l].regno_allocno_map != NULL)
360 ira_free (ira_loop_nodes[l].regno_allocno_map);
361 ira_loop_nodes[l].regno_allocno_map
362 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
363 * max_regno);
364 memset (ira_loop_nodes[l].regno_allocno_map, 0,
365 sizeof (ira_allocno_t) * max_regno);
367 ira_free (ira_regno_allocno_map);
368 ira_regno_allocno_map
369 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
370 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
371 FOR_EACH_ALLOCNO (a, ai)
373 if (ALLOCNO_CAP_MEMBER (a) != NULL)
374 /* Caps are not in the regno allocno maps. */
375 continue;
376 regno = ALLOCNO_REGNO (a);
377 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
378 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
379 ira_regno_allocno_map[regno] = a;
380 if (loop_tree_node->regno_allocno_map[regno] == NULL)
381 /* Remember that we can create temporary allocnos to break
382 cycles in register shuffle. */
383 loop_tree_node->regno_allocno_map[regno] = a;
388 /* Pools for allocnos, allocno live ranges and objects. */
389 static alloc_pool allocno_pool, live_range_pool, object_pool;
391 /* Vec containing references to all created allocnos. It is a
392 container of array allocnos. */
393 static VEC(ira_allocno_t,heap) *allocno_vec;
395 /* Vec containing references to all created ira_objects. It is a
396 container of ira_object_id_map. */
397 static VEC(ira_object_t,heap) *ira_object_id_map_vec;
399 /* Initialize data concerning allocnos. */
400 static void
401 initiate_allocnos (void)
403 live_range_pool
404 = create_alloc_pool ("live ranges",
405 sizeof (struct live_range), 100);
406 allocno_pool
407 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
408 object_pool
409 = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
410 allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
411 ira_allocnos = NULL;
412 ira_allocnos_num = 0;
413 ira_objects_num = 0;
414 ira_object_id_map_vec
415 = VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
416 ira_object_id_map = NULL;
417 ira_regno_allocno_map
418 = (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
419 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
422 /* Create and return an object corresponding to a new allocno A. */
423 static ira_object_t
424 ira_create_object (ira_allocno_t a, int subword)
426 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
427 ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
429 OBJECT_ALLOCNO (obj) = a;
430 OBJECT_SUBWORD (obj) = subword;
431 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
432 OBJECT_CONFLICT_VEC_P (obj) = false;
433 OBJECT_CONFLICT_ARRAY (obj) = NULL;
434 OBJECT_NUM_CONFLICTS (obj) = 0;
435 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
436 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
437 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
438 reg_class_contents[cover_class]);
439 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
440 reg_class_contents[cover_class]);
441 OBJECT_MIN (obj) = INT_MAX;
442 OBJECT_MAX (obj) = -1;
443 OBJECT_LIVE_RANGES (obj) = NULL;
445 VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
446 ira_object_id_map
447 = VEC_address (ira_object_t, ira_object_id_map_vec);
448 ira_objects_num = VEC_length (ira_object_t, ira_object_id_map_vec);
450 return obj;
453 /* Create and return the allocno corresponding to REGNO in
454 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
455 same regno if CAP_P is FALSE. */
456 ira_allocno_t
457 ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
459 ira_allocno_t a;
461 a = (ira_allocno_t) pool_alloc (allocno_pool);
462 ALLOCNO_REGNO (a) = regno;
463 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
464 if (! cap_p)
466 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
467 ira_regno_allocno_map[regno] = a;
468 if (loop_tree_node->regno_allocno_map[regno] == NULL)
469 /* Remember that we can create temporary allocnos to break
470 cycles in register shuffle on region borders (see
471 ira-emit.c). */
472 loop_tree_node->regno_allocno_map[regno] = a;
474 ALLOCNO_CAP (a) = NULL;
475 ALLOCNO_CAP_MEMBER (a) = NULL;
476 ALLOCNO_NUM (a) = ira_allocnos_num;
477 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
478 ALLOCNO_NREFS (a) = 0;
479 ALLOCNO_FREQ (a) = 0;
480 ALLOCNO_HARD_REGNO (a) = -1;
481 ALLOCNO_CALL_FREQ (a) = 0;
482 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
483 #ifdef STACK_REGS
484 ALLOCNO_NO_STACK_REG_P (a) = false;
485 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
486 #endif
487 ALLOCNO_MEM_OPTIMIZED_DEST (a) = NULL;
488 ALLOCNO_MEM_OPTIMIZED_DEST_P (a) = false;
489 ALLOCNO_SOMEWHERE_RENAMED_P (a) = false;
490 ALLOCNO_CHILD_RENAMED_P (a) = false;
491 ALLOCNO_DONT_REASSIGN_P (a) = false;
492 ALLOCNO_BAD_SPILL_P (a) = false;
493 ALLOCNO_IN_GRAPH_P (a) = false;
494 ALLOCNO_ASSIGNED_P (a) = false;
495 ALLOCNO_MAY_BE_SPILLED_P (a) = false;
496 ALLOCNO_SPLAY_REMOVED_P (a) = false;
497 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
498 ALLOCNO_COPIES (a) = NULL;
499 ALLOCNO_HARD_REG_COSTS (a) = NULL;
500 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
501 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
502 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
503 ALLOCNO_LEFT_CONFLICTS_SIZE (a) = -1;
504 ALLOCNO_COVER_CLASS (a) = NO_REGS;
505 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = 0;
506 ALLOCNO_COVER_CLASS_COST (a) = 0;
507 ALLOCNO_MEMORY_COST (a) = 0;
508 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
509 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
510 ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = NULL;
511 ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
512 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
513 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
514 ALLOCNO_NUM_OBJECTS (a) = 0;
516 VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
517 ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
518 ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
520 return a;
523 /* Set up cover class for A and update its conflict hard registers. */
524 void
525 ira_set_allocno_cover_class (ira_allocno_t a, enum reg_class cover_class)
527 ALLOCNO_COVER_CLASS (a) = cover_class;
530 /* Determine the number of objects we should associate with allocno A
531 and allocate them. */
532 void
533 ira_create_allocno_objects (ira_allocno_t a)
535 enum machine_mode mode = ALLOCNO_MODE (a);
536 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
537 int n = ira_reg_class_nregs[cover_class][mode];
538 int i;
540 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
541 n = 1;
543 ALLOCNO_NUM_OBJECTS (a) = n;
544 for (i = 0; i < n; i++)
545 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
548 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
549 ALLOCNO_OBJECT structures. This must be called after the cover
550 classes are known. */
551 static void
552 create_allocno_objects (void)
554 ira_allocno_t a;
555 ira_allocno_iterator ai;
557 FOR_EACH_ALLOCNO (a, ai)
558 ira_create_allocno_objects (a);
561 /* Merge hard register conflict information for all objects associated with
562 allocno TO into the corresponding objects associated with FROM.
563 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
564 static void
565 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
566 bool total_only)
568 int i;
569 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
570 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
572 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
573 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
574 if (!total_only)
575 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
576 OBJECT_CONFLICT_HARD_REGS (from_obj));
577 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
578 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
580 #ifdef STACK_REGS
581 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
582 ALLOCNO_NO_STACK_REG_P (to) = true;
583 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
584 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
585 #endif
588 /* Update hard register conflict information for all objects associated with
589 A to include the regs in SET. */
590 void
591 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
593 ira_allocno_object_iterator i;
594 ira_object_t obj;
595 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
597 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
598 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
602 /* Return TRUE if a conflict vector with NUM elements is more
603 profitable than a conflict bit vector for OBJ. */
604 bool
605 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
607 int nw;
608 int max = OBJECT_MAX (obj);
609 int min = OBJECT_MIN (obj);
611 if (max < min)
612 /* We prefer a bit vector in such case because it does not result
613 in allocation. */
614 return false;
616 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
617 return (2 * sizeof (ira_object_t) * (num + 1)
618 < 3 * nw * sizeof (IRA_INT_TYPE));
621 /* Allocates and initialize the conflict vector of OBJ for NUM
622 conflicting objects. */
623 void
624 ira_allocate_conflict_vec (ira_object_t obj, int num)
626 int size;
627 ira_object_t *vec;
629 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
630 num++; /* for NULL end marker */
631 size = sizeof (ira_object_t) * num;
632 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
633 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
634 vec[0] = NULL;
635 OBJECT_NUM_CONFLICTS (obj) = 0;
636 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
637 OBJECT_CONFLICT_VEC_P (obj) = true;
640 /* Allocate and initialize the conflict bit vector of OBJ. */
641 static void
642 allocate_conflict_bit_vec (ira_object_t obj)
644 unsigned int size;
646 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
647 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
648 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
649 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
650 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
651 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
652 OBJECT_CONFLICT_VEC_P (obj) = false;
655 /* Allocate and initialize the conflict vector or conflict bit vector
656 of OBJ for NUM conflicting allocnos whatever is more profitable. */
657 void
658 ira_allocate_object_conflicts (ira_object_t obj, int num)
660 if (ira_conflict_vector_profitable_p (obj, num))
661 ira_allocate_conflict_vec (obj, num);
662 else
663 allocate_conflict_bit_vec (obj);
666 /* Add OBJ2 to the conflicts of OBJ1. */
667 static void
668 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
670 int num;
671 unsigned int size;
673 if (OBJECT_CONFLICT_VEC_P (obj1))
675 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
676 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
677 num = curr_num + 2;
678 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
680 ira_object_t *newvec;
681 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
682 newvec = (ira_object_t *) ira_allocate (size);
683 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
684 ira_free (vec);
685 vec = newvec;
686 OBJECT_CONFLICT_ARRAY (obj1) = vec;
687 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
689 vec[num - 2] = obj2;
690 vec[num - 1] = NULL;
691 OBJECT_NUM_CONFLICTS (obj1)++;
693 else
695 int nw, added_head_nw, id;
696 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
698 id = OBJECT_CONFLICT_ID (obj2);
699 if (OBJECT_MIN (obj1) > id)
701 /* Expand head of the bit vector. */
702 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
703 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
704 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
705 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
707 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
708 vec, nw * sizeof (IRA_INT_TYPE));
709 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
711 else
713 size
714 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
715 vec = (IRA_INT_TYPE *) ira_allocate (size);
716 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
717 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
718 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
719 memset ((char *) vec
720 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
721 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
722 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
723 OBJECT_CONFLICT_ARRAY (obj1) = vec;
724 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
726 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
728 else if (OBJECT_MAX (obj1) < id)
730 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
731 size = nw * sizeof (IRA_INT_TYPE);
732 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
734 /* Expand tail of the bit vector. */
735 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
736 vec = (IRA_INT_TYPE *) ira_allocate (size);
737 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
738 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
739 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
740 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
741 OBJECT_CONFLICT_ARRAY (obj1) = vec;
742 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
744 OBJECT_MAX (obj1) = id;
746 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
750 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
751 static void
752 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
754 add_to_conflicts (obj1, obj2);
755 add_to_conflicts (obj2, obj1);
758 /* Clear all conflicts of OBJ. */
759 static void
760 clear_conflicts (ira_object_t obj)
762 if (OBJECT_CONFLICT_VEC_P (obj))
764 OBJECT_NUM_CONFLICTS (obj) = 0;
765 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
767 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
769 int nw;
771 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
772 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
776 /* The array used to find duplications in conflict vectors of
777 allocnos. */
778 static int *conflict_check;
780 /* The value used to mark allocation presence in conflict vector of
781 the current allocno. */
782 static int curr_conflict_check_tick;
784 /* Remove duplications in conflict vector of OBJ. */
785 static void
786 compress_conflict_vec (ira_object_t obj)
788 ira_object_t *vec, conflict_obj;
789 int i, j;
791 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
792 vec = OBJECT_CONFLICT_VEC (obj);
793 curr_conflict_check_tick++;
794 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
796 int id = OBJECT_CONFLICT_ID (conflict_obj);
797 if (conflict_check[id] != curr_conflict_check_tick)
799 conflict_check[id] = curr_conflict_check_tick;
800 vec[j++] = conflict_obj;
803 OBJECT_NUM_CONFLICTS (obj) = j;
804 vec[j] = NULL;
807 /* Remove duplications in conflict vectors of all allocnos. */
808 static void
809 compress_conflict_vecs (void)
811 ira_object_t obj;
812 ira_object_iterator oi;
814 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
815 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
816 curr_conflict_check_tick = 0;
817 FOR_EACH_OBJECT (obj, oi)
819 if (OBJECT_CONFLICT_VEC_P (obj))
820 compress_conflict_vec (obj);
822 ira_free (conflict_check);
825 /* This recursive function outputs allocno A and if it is a cap the
826 function outputs its members. */
827 void
828 ira_print_expanded_allocno (ira_allocno_t a)
830 basic_block bb;
832 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
833 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
834 fprintf (ira_dump_file, ",b%d", bb->index);
835 else
836 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
837 if (ALLOCNO_CAP_MEMBER (a) != NULL)
839 fprintf (ira_dump_file, ":");
840 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
842 fprintf (ira_dump_file, ")");
845 /* Create and return the cap representing allocno A in the
846 parent loop. */
847 static ira_allocno_t
848 create_cap_allocno (ira_allocno_t a)
850 ira_allocno_t cap;
851 ira_loop_tree_node_t parent;
852 enum reg_class cover_class;
854 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
855 && ALLOCNO_NEXT_COALESCED_ALLOCNO (a) == a);
856 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
857 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
858 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
859 cover_class = ALLOCNO_COVER_CLASS (a);
860 ira_set_allocno_cover_class (cap, cover_class);
861 ira_create_allocno_objects (cap);
862 ALLOCNO_AVAILABLE_REGS_NUM (cap) = ALLOCNO_AVAILABLE_REGS_NUM (a);
863 ALLOCNO_CAP_MEMBER (cap) = a;
864 ALLOCNO_CAP (a) = cap;
865 ALLOCNO_COVER_CLASS_COST (cap) = ALLOCNO_COVER_CLASS_COST (a);
866 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
867 ira_allocate_and_copy_costs
868 (&ALLOCNO_HARD_REG_COSTS (cap), cover_class, ALLOCNO_HARD_REG_COSTS (a));
869 ira_allocate_and_copy_costs
870 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
871 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
872 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
873 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
874 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
875 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
877 merge_hard_reg_conflicts (a, cap, false);
879 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
880 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
882 fprintf (ira_dump_file, " Creating cap ");
883 ira_print_expanded_allocno (cap);
884 fprintf (ira_dump_file, "\n");
886 return cap;
889 /* Create and return a live range for OBJECT with given attributes. */
890 live_range_t
891 ira_create_live_range (ira_object_t obj, int start, int finish,
892 live_range_t next)
894 live_range_t p;
896 p = (live_range_t) pool_alloc (live_range_pool);
897 p->object = obj;
898 p->start = start;
899 p->finish = finish;
900 p->next = next;
901 return p;
904 /* Create a new live range for OBJECT and queue it at the head of its
905 live range list. */
906 void
907 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
909 live_range_t p;
910 p = ira_create_live_range (object, start, finish,
911 OBJECT_LIVE_RANGES (object));
912 OBJECT_LIVE_RANGES (object) = p;
915 /* Copy allocno live range R and return the result. */
916 static live_range_t
917 copy_live_range (live_range_t r)
919 live_range_t p;
921 p = (live_range_t) pool_alloc (live_range_pool);
922 *p = *r;
923 return p;
926 /* Copy allocno live range list given by its head R and return the
927 result. */
928 live_range_t
929 ira_copy_live_range_list (live_range_t r)
931 live_range_t p, first, last;
933 if (r == NULL)
934 return NULL;
935 for (first = last = NULL; r != NULL; r = r->next)
937 p = copy_live_range (r);
938 if (first == NULL)
939 first = p;
940 else
941 last->next = p;
942 last = p;
944 return first;
947 /* Merge ranges R1 and R2 and returns the result. The function
948 maintains the order of ranges and tries to minimize number of the
949 result ranges. */
950 live_range_t
951 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
953 live_range_t first, last, temp;
955 if (r1 == NULL)
956 return r2;
957 if (r2 == NULL)
958 return r1;
959 for (first = last = NULL; r1 != NULL && r2 != NULL;)
961 if (r1->start < r2->start)
963 temp = r1;
964 r1 = r2;
965 r2 = temp;
967 if (r1->start <= r2->finish + 1)
969 /* Intersected ranges: merge r1 and r2 into r1. */
970 r1->start = r2->start;
971 if (r1->finish < r2->finish)
972 r1->finish = r2->finish;
973 temp = r2;
974 r2 = r2->next;
975 ira_finish_live_range (temp);
976 if (r2 == NULL)
978 /* To try to merge with subsequent ranges in r1. */
979 r2 = r1->next;
980 r1->next = NULL;
983 else
985 /* Add r1 to the result. */
986 if (first == NULL)
987 first = last = r1;
988 else
990 last->next = r1;
991 last = r1;
993 r1 = r1->next;
994 if (r1 == NULL)
996 /* To try to merge with subsequent ranges in r2. */
997 r1 = r2->next;
998 r2->next = NULL;
1002 if (r1 != NULL)
1004 if (first == NULL)
1005 first = r1;
1006 else
1007 last->next = r1;
1008 ira_assert (r1->next == NULL);
1010 else if (r2 != NULL)
1012 if (first == NULL)
1013 first = r2;
1014 else
1015 last->next = r2;
1016 ira_assert (r2->next == NULL);
1018 else
1020 ira_assert (last->next == NULL);
1022 return first;
1025 /* Return TRUE if live ranges R1 and R2 intersect. */
1026 bool
1027 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1029 /* Remember the live ranges are always kept ordered. */
1030 while (r1 != NULL && r2 != NULL)
1032 if (r1->start > r2->finish)
1033 r1 = r1->next;
1034 else if (r2->start > r1->finish)
1035 r2 = r2->next;
1036 else
1037 return true;
1039 return false;
1042 /* Free allocno live range R. */
1043 void
1044 ira_finish_live_range (live_range_t r)
1046 pool_free (live_range_pool, r);
1049 /* Free list of allocno live ranges starting with R. */
1050 void
1051 ira_finish_live_range_list (live_range_t r)
1053 live_range_t next_r;
1055 for (; r != NULL; r = next_r)
1057 next_r = r->next;
1058 ira_finish_live_range (r);
1062 /* Free updated register costs of allocno A. */
1063 void
1064 ira_free_allocno_updated_costs (ira_allocno_t a)
1066 enum reg_class cover_class;
1068 cover_class = ALLOCNO_COVER_CLASS (a);
1069 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1070 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
1071 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1072 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1073 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1074 cover_class);
1075 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1078 /* Free the memory allocated for allocno A. */
1079 static void
1080 finish_allocno (ira_allocno_t a)
1082 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
1083 ira_object_t obj;
1084 ira_allocno_object_iterator oi;
1086 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1088 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1089 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1090 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1091 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1092 pool_free (object_pool, obj);
1095 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1096 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1097 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
1098 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1099 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
1100 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1101 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
1102 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1103 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1104 cover_class);
1105 pool_free (allocno_pool, a);
1108 /* Free the memory allocated for all allocnos. */
1109 static void
1110 finish_allocnos (void)
1112 ira_allocno_t a;
1113 ira_allocno_iterator ai;
1115 FOR_EACH_ALLOCNO (a, ai)
1116 finish_allocno (a);
1117 ira_free (ira_regno_allocno_map);
1118 VEC_free (ira_object_t, heap, ira_object_id_map_vec);
1119 VEC_free (ira_allocno_t, heap, allocno_vec);
1120 free_alloc_pool (allocno_pool);
1121 free_alloc_pool (object_pool);
1122 free_alloc_pool (live_range_pool);
1127 /* Pools for copies. */
1128 static alloc_pool copy_pool;
1130 /* Vec containing references to all created copies. It is a
1131 container of array ira_copies. */
1132 static VEC(ira_copy_t,heap) *copy_vec;
1134 /* The function initializes data concerning allocno copies. */
1135 static void
1136 initiate_copies (void)
1138 copy_pool
1139 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1140 copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
1141 ira_copies = NULL;
1142 ira_copies_num = 0;
1145 /* Return copy connecting A1 and A2 and originated from INSN of
1146 LOOP_TREE_NODE if any. */
1147 static ira_copy_t
1148 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1149 ira_loop_tree_node_t loop_tree_node)
1151 ira_copy_t cp, next_cp;
1152 ira_allocno_t another_a;
1154 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1156 if (cp->first == a1)
1158 next_cp = cp->next_first_allocno_copy;
1159 another_a = cp->second;
1161 else if (cp->second == a1)
1163 next_cp = cp->next_second_allocno_copy;
1164 another_a = cp->first;
1166 else
1167 gcc_unreachable ();
1168 if (another_a == a2 && cp->insn == insn
1169 && cp->loop_tree_node == loop_tree_node)
1170 return cp;
1172 return NULL;
1175 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1176 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1177 ira_copy_t
1178 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1179 bool constraint_p, rtx insn,
1180 ira_loop_tree_node_t loop_tree_node)
1182 ira_copy_t cp;
1184 cp = (ira_copy_t) pool_alloc (copy_pool);
1185 cp->num = ira_copies_num;
1186 cp->first = first;
1187 cp->second = second;
1188 cp->freq = freq;
1189 cp->constraint_p = constraint_p;
1190 cp->insn = insn;
1191 cp->loop_tree_node = loop_tree_node;
1192 VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
1193 ira_copies = VEC_address (ira_copy_t, copy_vec);
1194 ira_copies_num = VEC_length (ira_copy_t, copy_vec);
1195 return cp;
1198 /* Attach a copy CP to allocnos involved into the copy. */
1199 void
1200 ira_add_allocno_copy_to_list (ira_copy_t cp)
1202 ira_allocno_t first = cp->first, second = cp->second;
1204 cp->prev_first_allocno_copy = NULL;
1205 cp->prev_second_allocno_copy = NULL;
1206 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1207 if (cp->next_first_allocno_copy != NULL)
1209 if (cp->next_first_allocno_copy->first == first)
1210 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1211 else
1212 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1214 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1215 if (cp->next_second_allocno_copy != NULL)
1217 if (cp->next_second_allocno_copy->second == second)
1218 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1219 else
1220 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1222 ALLOCNO_COPIES (first) = cp;
1223 ALLOCNO_COPIES (second) = cp;
1226 /* Make a copy CP a canonical copy where number of the
1227 first allocno is less than the second one. */
1228 void
1229 ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1231 ira_allocno_t temp;
1232 ira_copy_t temp_cp;
1234 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1235 return;
1237 temp = cp->first;
1238 cp->first = cp->second;
1239 cp->second = temp;
1241 temp_cp = cp->prev_first_allocno_copy;
1242 cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1243 cp->prev_second_allocno_copy = temp_cp;
1245 temp_cp = cp->next_first_allocno_copy;
1246 cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1247 cp->next_second_allocno_copy = temp_cp;
1250 /* Create (or update frequency if the copy already exists) and return
1251 the copy of allocnos FIRST and SECOND with frequency FREQ
1252 corresponding to move insn INSN (if any) and originated from
1253 LOOP_TREE_NODE. */
1254 ira_copy_t
1255 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1256 bool constraint_p, rtx insn,
1257 ira_loop_tree_node_t loop_tree_node)
1259 ira_copy_t cp;
1261 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1263 cp->freq += freq;
1264 return cp;
1266 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1267 loop_tree_node);
1268 ira_assert (first != NULL && second != NULL);
1269 ira_add_allocno_copy_to_list (cp);
1270 ira_swap_allocno_copy_ends_if_necessary (cp);
1271 return cp;
1274 /* Print info about copy CP into file F. */
1275 static void
1276 print_copy (FILE *f, ira_copy_t cp)
1278 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1279 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1280 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1281 cp->insn != NULL
1282 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1285 /* Print info about copy CP into stderr. */
1286 void
1287 ira_debug_copy (ira_copy_t cp)
1289 print_copy (stderr, cp);
1292 /* Print info about all copies into file F. */
1293 static void
1294 print_copies (FILE *f)
1296 ira_copy_t cp;
1297 ira_copy_iterator ci;
1299 FOR_EACH_COPY (cp, ci)
1300 print_copy (f, cp);
1303 /* Print info about all copies into stderr. */
1304 void
1305 ira_debug_copies (void)
1307 print_copies (stderr);
1310 /* Print info about copies involving allocno A into file F. */
1311 static void
1312 print_allocno_copies (FILE *f, ira_allocno_t a)
1314 ira_allocno_t another_a;
1315 ira_copy_t cp, next_cp;
1317 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1318 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1320 if (cp->first == a)
1322 next_cp = cp->next_first_allocno_copy;
1323 another_a = cp->second;
1325 else if (cp->second == a)
1327 next_cp = cp->next_second_allocno_copy;
1328 another_a = cp->first;
1330 else
1331 gcc_unreachable ();
1332 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1333 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1335 fprintf (f, "\n");
1338 /* Print info about copies involving allocno A into stderr. */
1339 void
1340 ira_debug_allocno_copies (ira_allocno_t a)
1342 print_allocno_copies (stderr, a);
1345 /* The function frees memory allocated for copy CP. */
1346 static void
1347 finish_copy (ira_copy_t cp)
1349 pool_free (copy_pool, cp);
1353 /* Free memory allocated for all copies. */
1354 static void
1355 finish_copies (void)
1357 ira_copy_t cp;
1358 ira_copy_iterator ci;
1360 FOR_EACH_COPY (cp, ci)
1361 finish_copy (cp);
1362 VEC_free (ira_copy_t, heap, copy_vec);
1363 free_alloc_pool (copy_pool);
1368 /* Pools for cost vectors. It is defined only for cover classes. */
1369 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1371 /* The function initiates work with hard register cost vectors. It
1372 creates allocation pool for each cover class. */
1373 static void
1374 initiate_cost_vectors (void)
1376 int i;
1377 enum reg_class cover_class;
1379 for (i = 0; i < ira_reg_class_cover_size; i++)
1381 cover_class = ira_reg_class_cover[i];
1382 cost_vector_pool[cover_class]
1383 = create_alloc_pool ("cost vectors",
1384 sizeof (int)
1385 * ira_class_hard_regs_num[cover_class],
1386 100);
1390 /* Allocate and return a cost vector VEC for COVER_CLASS. */
1391 int *
1392 ira_allocate_cost_vector (enum reg_class cover_class)
1394 return (int *) pool_alloc (cost_vector_pool[cover_class]);
1397 /* Free a cost vector VEC for COVER_CLASS. */
1398 void
1399 ira_free_cost_vector (int *vec, enum reg_class cover_class)
1401 ira_assert (vec != NULL);
1402 pool_free (cost_vector_pool[cover_class], vec);
1405 /* Finish work with hard register cost vectors. Release allocation
1406 pool for each cover class. */
1407 static void
1408 finish_cost_vectors (void)
1410 int i;
1411 enum reg_class cover_class;
1413 for (i = 0; i < ira_reg_class_cover_size; i++)
1415 cover_class = ira_reg_class_cover[i];
1416 free_alloc_pool (cost_vector_pool[cover_class]);
1422 /* The current loop tree node and its regno allocno map. */
1423 ira_loop_tree_node_t ira_curr_loop_tree_node;
1424 ira_allocno_t *ira_curr_regno_allocno_map;
1426 /* This recursive function traverses loop tree with root LOOP_NODE
1427 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1428 correspondingly in preorder and postorder. The function sets up
1429 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1430 basic block nodes of LOOP_NODE is also processed (before its
1431 subloop nodes). */
1432 void
1433 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1434 void (*preorder_func) (ira_loop_tree_node_t),
1435 void (*postorder_func) (ira_loop_tree_node_t))
1437 ira_loop_tree_node_t subloop_node;
1439 ira_assert (loop_node->bb == NULL);
1440 ira_curr_loop_tree_node = loop_node;
1441 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1443 if (preorder_func != NULL)
1444 (*preorder_func) (loop_node);
1446 if (bb_p)
1447 for (subloop_node = loop_node->children;
1448 subloop_node != NULL;
1449 subloop_node = subloop_node->next)
1450 if (subloop_node->bb != NULL)
1452 if (preorder_func != NULL)
1453 (*preorder_func) (subloop_node);
1455 if (postorder_func != NULL)
1456 (*postorder_func) (subloop_node);
1459 for (subloop_node = loop_node->subloops;
1460 subloop_node != NULL;
1461 subloop_node = subloop_node->subloop_next)
1463 ira_assert (subloop_node->bb == NULL);
1464 ira_traverse_loop_tree (bb_p, subloop_node,
1465 preorder_func, postorder_func);
1468 ira_curr_loop_tree_node = loop_node;
1469 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1471 if (postorder_func != NULL)
1472 (*postorder_func) (loop_node);
1477 /* The basic block currently being processed. */
1478 static basic_block curr_bb;
1480 /* This recursive function creates allocnos corresponding to
1481 pseudo-registers containing in X. True OUTPUT_P means that X is
1482 a lvalue. */
1483 static void
1484 create_insn_allocnos (rtx x, bool output_p)
1486 int i, j;
1487 const char *fmt;
1488 enum rtx_code code = GET_CODE (x);
1490 if (code == REG)
1492 int regno;
1494 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1496 ira_allocno_t a;
1498 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1499 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1501 ALLOCNO_NREFS (a)++;
1502 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1503 if (output_p)
1504 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1506 return;
1508 else if (code == SET)
1510 create_insn_allocnos (SET_DEST (x), true);
1511 create_insn_allocnos (SET_SRC (x), false);
1512 return;
1514 else if (code == CLOBBER)
1516 create_insn_allocnos (XEXP (x, 0), true);
1517 return;
1519 else if (code == MEM)
1521 create_insn_allocnos (XEXP (x, 0), false);
1522 return;
1524 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1525 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1527 create_insn_allocnos (XEXP (x, 0), true);
1528 create_insn_allocnos (XEXP (x, 0), false);
1529 return;
1532 fmt = GET_RTX_FORMAT (code);
1533 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1535 if (fmt[i] == 'e')
1536 create_insn_allocnos (XEXP (x, i), output_p);
1537 else if (fmt[i] == 'E')
1538 for (j = 0; j < XVECLEN (x, i); j++)
1539 create_insn_allocnos (XVECEXP (x, i, j), output_p);
1543 /* Create allocnos corresponding to pseudo-registers living in the
1544 basic block represented by the corresponding loop tree node
1545 BB_NODE. */
1546 static void
1547 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1549 basic_block bb;
1550 rtx insn;
1551 unsigned int i;
1552 bitmap_iterator bi;
1554 curr_bb = bb = bb_node->bb;
1555 ira_assert (bb != NULL);
1556 FOR_BB_INSNS_REVERSE (bb, insn)
1557 if (NONDEBUG_INSN_P (insn))
1558 create_insn_allocnos (PATTERN (insn), false);
1559 /* It might be a allocno living through from one subloop to
1560 another. */
1561 EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
1562 if (ira_curr_regno_allocno_map[i] == NULL)
1563 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1566 /* Create allocnos corresponding to pseudo-registers living on edge E
1567 (a loop entry or exit). Also mark the allocnos as living on the
1568 loop border. */
1569 static void
1570 create_loop_allocnos (edge e)
1572 unsigned int i;
1573 bitmap live_in_regs, border_allocnos;
1574 bitmap_iterator bi;
1575 ira_loop_tree_node_t parent;
1577 live_in_regs = DF_LR_IN (e->dest);
1578 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1579 EXECUTE_IF_SET_IN_REG_SET (DF_LR_OUT (e->src),
1580 FIRST_PSEUDO_REGISTER, i, bi)
1581 if (bitmap_bit_p (live_in_regs, i))
1583 if (ira_curr_regno_allocno_map[i] == NULL)
1585 /* The order of creations is important for right
1586 ira_regno_allocno_map. */
1587 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1588 && parent->regno_allocno_map[i] == NULL)
1589 ira_create_allocno (i, false, parent);
1590 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1592 bitmap_set_bit (border_allocnos,
1593 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1597 /* Create allocnos corresponding to pseudo-registers living in loop
1598 represented by the corresponding loop tree node LOOP_NODE. This
1599 function is called by ira_traverse_loop_tree. */
1600 static void
1601 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1603 if (loop_node->bb != NULL)
1604 create_bb_allocnos (loop_node);
1605 else if (loop_node != ira_loop_tree_root)
1607 int i;
1608 edge_iterator ei;
1609 edge e;
1610 VEC (edge, heap) *edges;
1612 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1613 if (e->src != loop_node->loop->latch)
1614 create_loop_allocnos (e);
1616 edges = get_loop_exit_edges (loop_node->loop);
1617 FOR_EACH_VEC_ELT (edge, edges, i, e)
1618 create_loop_allocnos (e);
1619 VEC_free (edge, heap, edges);
1623 /* Propagate information about allocnos modified inside the loop given
1624 by its LOOP_TREE_NODE to its parent. */
1625 static void
1626 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1628 if (loop_tree_node == ira_loop_tree_root)
1629 return;
1630 ira_assert (loop_tree_node->bb == NULL);
1631 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1632 loop_tree_node->modified_regnos);
1635 /* Propagate new info about allocno A (see comments about accumulated
1636 info in allocno definition) to the corresponding allocno on upper
1637 loop tree level. So allocnos on upper levels accumulate
1638 information about the corresponding allocnos in nested regions.
1639 The new info means allocno info finally calculated in this
1640 file. */
1641 static void
1642 propagate_allocno_info (void)
1644 int i;
1645 ira_allocno_t a, parent_a;
1646 ira_loop_tree_node_t parent;
1647 enum reg_class cover_class;
1649 if (flag_ira_region != IRA_REGION_ALL
1650 && flag_ira_region != IRA_REGION_MIXED)
1651 return;
1652 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1653 for (a = ira_regno_allocno_map[i];
1654 a != NULL;
1655 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1656 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1657 && (parent_a = parent->regno_allocno_map[i]) != NULL
1658 /* There are no caps yet at this point. So use
1659 border_allocnos to find allocnos for the propagation. */
1660 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1661 ALLOCNO_NUM (a)))
1663 if (! ALLOCNO_BAD_SPILL_P (a))
1664 ALLOCNO_BAD_SPILL_P (parent_a) = false;
1665 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1666 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1667 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1668 merge_hard_reg_conflicts (a, parent_a, true);
1669 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1670 += ALLOCNO_CALLS_CROSSED_NUM (a);
1671 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1672 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1673 cover_class = ALLOCNO_COVER_CLASS (a);
1674 ira_assert (cover_class == ALLOCNO_COVER_CLASS (parent_a));
1675 ira_allocate_and_accumulate_costs
1676 (&ALLOCNO_HARD_REG_COSTS (parent_a), cover_class,
1677 ALLOCNO_HARD_REG_COSTS (a));
1678 ira_allocate_and_accumulate_costs
1679 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1680 cover_class,
1681 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1682 ALLOCNO_COVER_CLASS_COST (parent_a)
1683 += ALLOCNO_COVER_CLASS_COST (a);
1684 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1688 /* Create allocnos corresponding to pseudo-registers in the current
1689 function. Traverse the loop tree for this. */
1690 static void
1691 create_allocnos (void)
1693 /* We need to process BB first to correctly link allocnos by member
1694 next_regno_allocno. */
1695 ira_traverse_loop_tree (true, ira_loop_tree_root,
1696 create_loop_tree_node_allocnos, NULL);
1697 if (optimize)
1698 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1699 propagate_modified_regnos);
1704 /* The page contains function to remove some regions from a separate
1705 register allocation. We remove regions whose separate allocation
1706 will hardly improve the result. As a result we speed up regional
1707 register allocation. */
1709 /* The function changes the object in range list given by R to OBJ. */
1710 static void
1711 change_object_in_range_list (live_range_t r, ira_object_t obj)
1713 for (; r != NULL; r = r->next)
1714 r->object = obj;
1717 /* Move all live ranges associated with allocno FROM to allocno TO. */
1718 static void
1719 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1721 int i;
1722 int n = ALLOCNO_NUM_OBJECTS (from);
1724 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1726 for (i = 0; i < n; i++)
1728 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1729 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1730 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1732 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1734 fprintf (ira_dump_file,
1735 " Moving ranges of a%dr%d to a%dr%d: ",
1736 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1737 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1738 ira_print_live_range_list (ira_dump_file, lr);
1740 change_object_in_range_list (lr, to_obj);
1741 OBJECT_LIVE_RANGES (to_obj)
1742 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1743 OBJECT_LIVE_RANGES (from_obj) = NULL;
1747 static void
1748 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1750 int i;
1751 int n = ALLOCNO_NUM_OBJECTS (from);
1753 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1755 for (i = 0; i < n; i++)
1757 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1758 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1759 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1761 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1763 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
1764 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1765 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1766 ira_print_live_range_list (ira_dump_file, lr);
1768 lr = ira_copy_live_range_list (lr);
1769 change_object_in_range_list (lr, to_obj);
1770 OBJECT_LIVE_RANGES (to_obj)
1771 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1775 /* Return TRUE if NODE represents a loop with low register
1776 pressure. */
1777 static bool
1778 low_pressure_loop_node_p (ira_loop_tree_node_t node)
1780 int i;
1781 enum reg_class cover_class;
1783 if (node->bb != NULL)
1784 return false;
1786 for (i = 0; i < ira_reg_class_cover_size; i++)
1788 cover_class = ira_reg_class_cover[i];
1789 if (node->reg_pressure[cover_class]
1790 > ira_available_class_regs[cover_class])
1791 return false;
1793 return true;
1796 /* Sort loops for marking them for removal. We put already marked
1797 loops first, then less frequent loops next, and then outer loops
1798 next. */
1799 static int
1800 loop_compare_func (const void *v1p, const void *v2p)
1802 int diff;
1803 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
1804 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
1806 ira_assert (l1->parent != NULL && l2->parent != NULL);
1807 if (l1->to_remove_p && ! l2->to_remove_p)
1808 return -1;
1809 if (! l1->to_remove_p && l2->to_remove_p)
1810 return 1;
1811 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
1812 return diff;
1813 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
1814 return diff;
1815 /* Make sorting stable. */
1816 return l1->loop->num - l2->loop->num;
1820 /* Mark loops which should be removed from regional allocation. We
1821 remove a loop with low register pressure inside another loop with
1822 register pressure. In this case a separate allocation of the loop
1823 hardly helps (for irregular register file architecture it could
1824 help by choosing a better hard register in the loop but we prefer
1825 faster allocation even in this case). We also remove cheap loops
1826 if there are more than IRA_MAX_LOOPS_NUM of them. */
1827 static void
1828 mark_loops_for_removal (void)
1830 int i, n;
1831 ira_loop_tree_node_t *sorted_loops;
1832 loop_p loop;
1834 sorted_loops
1835 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
1836 * VEC_length (loop_p,
1837 ira_loops.larray));
1838 for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1839 if (ira_loop_nodes[i].regno_allocno_map != NULL)
1841 if (ira_loop_nodes[i].parent == NULL)
1843 /* Don't remove the root. */
1844 ira_loop_nodes[i].to_remove_p = false;
1845 continue;
1847 sorted_loops[n++] = &ira_loop_nodes[i];
1848 ira_loop_nodes[i].to_remove_p
1849 = (low_pressure_loop_node_p (ira_loop_nodes[i].parent)
1850 && low_pressure_loop_node_p (&ira_loop_nodes[i]));
1852 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
1853 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
1855 sorted_loops[i]->to_remove_p = true;
1856 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1857 fprintf
1858 (ira_dump_file,
1859 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
1860 sorted_loops[i]->loop->num, sorted_loops[i]->loop->header->index,
1861 sorted_loops[i]->loop->header->frequency,
1862 loop_depth (sorted_loops[i]->loop),
1863 low_pressure_loop_node_p (sorted_loops[i]->parent)
1864 && low_pressure_loop_node_p (sorted_loops[i])
1865 ? "low pressure" : "cheap loop");
1867 ira_free (sorted_loops);
1870 /* Mark all loops but root for removing. */
1871 static void
1872 mark_all_loops_for_removal (void)
1874 int i;
1875 loop_p loop;
1877 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
1878 if (ira_loop_nodes[i].regno_allocno_map != NULL)
1880 if (ira_loop_nodes[i].parent == NULL)
1882 /* Don't remove the root. */
1883 ira_loop_nodes[i].to_remove_p = false;
1884 continue;
1886 ira_loop_nodes[i].to_remove_p = true;
1887 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1888 fprintf
1889 (ira_dump_file,
1890 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
1891 ira_loop_nodes[i].loop->num,
1892 ira_loop_nodes[i].loop->header->index,
1893 ira_loop_nodes[i].loop->header->frequency,
1894 loop_depth (ira_loop_nodes[i].loop));
1898 /* Definition of vector of loop tree nodes. */
1899 DEF_VEC_P(ira_loop_tree_node_t);
1900 DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
1902 /* Vec containing references to all removed loop tree nodes. */
1903 static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
1905 /* Vec containing references to all children of loop tree nodes. */
1906 static VEC(ira_loop_tree_node_t,heap) *children_vec;
1908 /* Remove subregions of NODE if their separate allocation will not
1909 improve the result. */
1910 static void
1911 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
1913 unsigned int start;
1914 bool remove_p;
1915 ira_loop_tree_node_t subnode;
1917 remove_p = node->to_remove_p;
1918 if (! remove_p)
1919 VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
1920 start = VEC_length (ira_loop_tree_node_t, children_vec);
1921 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
1922 if (subnode->bb == NULL)
1923 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
1924 else
1925 VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
1926 node->children = node->subloops = NULL;
1927 if (remove_p)
1929 VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
1930 return;
1932 while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
1934 subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
1935 subnode->parent = node;
1936 subnode->next = node->children;
1937 node->children = subnode;
1938 if (subnode->bb == NULL)
1940 subnode->subloop_next = node->subloops;
1941 node->subloops = subnode;
1946 /* Return TRUE if NODE is inside PARENT. */
1947 static bool
1948 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
1950 for (node = node->parent; node != NULL; node = node->parent)
1951 if (node == parent)
1952 return true;
1953 return false;
1956 /* Sort allocnos according to their order in regno allocno list. */
1957 static int
1958 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
1960 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
1961 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
1962 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
1963 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
1965 if (loop_is_inside_p (n1, n2))
1966 return -1;
1967 else if (loop_is_inside_p (n2, n1))
1968 return 1;
1969 /* If allocnos are equally good, sort by allocno numbers, so that
1970 the results of qsort leave nothing to chance. We put allocnos
1971 with higher number first in the list because it is the original
1972 order for allocnos from loops on the same levels. */
1973 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
1976 /* This array is used to sort allocnos to restore allocno order in
1977 the regno allocno list. */
1978 static ira_allocno_t *regno_allocnos;
1980 /* Restore allocno order for REGNO in the regno allocno list. */
1981 static void
1982 ira_rebuild_regno_allocno_list (int regno)
1984 int i, n;
1985 ira_allocno_t a;
1987 for (n = 0, a = ira_regno_allocno_map[regno];
1988 a != NULL;
1989 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1990 regno_allocnos[n++] = a;
1991 ira_assert (n > 0);
1992 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
1993 regno_allocno_order_compare_func);
1994 for (i = 1; i < n; i++)
1995 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
1996 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
1997 ira_regno_allocno_map[regno] = regno_allocnos[0];
1998 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1999 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2002 /* Propagate info from allocno FROM_A to allocno A. */
2003 static void
2004 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2006 enum reg_class cover_class;
2008 merge_hard_reg_conflicts (from_a, a, false);
2009 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2010 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2011 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2012 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2013 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2014 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2015 if (! ALLOCNO_BAD_SPILL_P (from_a))
2016 ALLOCNO_BAD_SPILL_P (a) = false;
2017 cover_class = ALLOCNO_COVER_CLASS (from_a);
2018 ira_assert (cover_class == ALLOCNO_COVER_CLASS (a));
2019 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
2020 ALLOCNO_HARD_REG_COSTS (from_a));
2021 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2022 cover_class,
2023 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2024 ALLOCNO_COVER_CLASS_COST (a) += ALLOCNO_COVER_CLASS_COST (from_a);
2025 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2028 /* Remove allocnos from loops removed from the allocation
2029 consideration. */
2030 static void
2031 remove_unnecessary_allocnos (void)
2033 int regno;
2034 bool merged_p, rebuild_p;
2035 ira_allocno_t a, prev_a, next_a, parent_a;
2036 ira_loop_tree_node_t a_node, parent;
2038 merged_p = false;
2039 regno_allocnos = NULL;
2040 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2042 rebuild_p = false;
2043 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2044 a != NULL;
2045 a = next_a)
2047 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2048 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2049 if (! a_node->to_remove_p)
2050 prev_a = a;
2051 else
2053 for (parent = a_node->parent;
2054 (parent_a = parent->regno_allocno_map[regno]) == NULL
2055 && parent->to_remove_p;
2056 parent = parent->parent)
2058 if (parent_a == NULL)
2060 /* There are no allocnos with the same regno in
2061 upper region -- just move the allocno to the
2062 upper region. */
2063 prev_a = a;
2064 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2065 parent->regno_allocno_map[regno] = a;
2066 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2067 rebuild_p = true;
2069 else
2071 /* Remove the allocno and update info of allocno in
2072 the upper region. */
2073 if (prev_a == NULL)
2074 ira_regno_allocno_map[regno] = next_a;
2075 else
2076 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2077 move_allocno_live_ranges (a, parent_a);
2078 merged_p = true;
2079 propagate_some_info_from_allocno (parent_a, a);
2080 /* Remove it from the corresponding regno allocno
2081 map to avoid info propagation of subsequent
2082 allocno into this already removed allocno. */
2083 a_node->regno_allocno_map[regno] = NULL;
2084 finish_allocno (a);
2088 if (rebuild_p)
2089 /* We need to restore the order in regno allocno list. */
2091 if (regno_allocnos == NULL)
2092 regno_allocnos
2093 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2094 * ira_allocnos_num);
2095 ira_rebuild_regno_allocno_list (regno);
2098 if (merged_p)
2099 ira_rebuild_start_finish_chains ();
2100 if (regno_allocnos != NULL)
2101 ira_free (regno_allocnos);
2104 /* Remove allocnos from all loops but the root. */
2105 static void
2106 remove_low_level_allocnos (void)
2108 int regno;
2109 bool merged_p, propagate_p;
2110 ira_allocno_t a, top_a;
2111 ira_loop_tree_node_t a_node, parent;
2112 ira_allocno_iterator ai;
2114 merged_p = false;
2115 FOR_EACH_ALLOCNO (a, ai)
2117 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2118 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2119 continue;
2120 regno = ALLOCNO_REGNO (a);
2121 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2123 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2124 ira_loop_tree_root->regno_allocno_map[regno] = a;
2125 continue;
2127 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2128 /* Remove the allocno and update info of allocno in the upper
2129 region. */
2130 move_allocno_live_ranges (a, top_a);
2131 merged_p = true;
2132 if (propagate_p)
2133 propagate_some_info_from_allocno (top_a, a);
2135 FOR_EACH_ALLOCNO (a, ai)
2137 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2138 if (a_node == ira_loop_tree_root)
2139 continue;
2140 parent = a_node->parent;
2141 regno = ALLOCNO_REGNO (a);
2142 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2143 ira_assert (ALLOCNO_CAP (a) != NULL);
2144 else if (ALLOCNO_CAP (a) == NULL)
2145 ira_assert (parent->regno_allocno_map[regno] != NULL);
2147 FOR_EACH_ALLOCNO (a, ai)
2149 regno = ALLOCNO_REGNO (a);
2150 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2152 ira_object_t obj;
2153 ira_allocno_object_iterator oi;
2155 ira_regno_allocno_map[regno] = a;
2156 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2157 ALLOCNO_CAP_MEMBER (a) = NULL;
2158 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2159 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2160 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2161 #ifdef STACK_REGS
2162 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2163 ALLOCNO_NO_STACK_REG_P (a) = true;
2164 #endif
2166 else
2167 finish_allocno (a);
2169 if (merged_p)
2170 ira_rebuild_start_finish_chains ();
2173 /* Remove loops from consideration. We remove all loops except for
2174 root if ALL_P or loops for which a separate allocation will not
2175 improve the result. We have to do this after allocno creation and
2176 their costs and cover class evaluation because only after that the
2177 register pressure can be known and is calculated. */
2178 static void
2179 remove_unnecessary_regions (bool all_p)
2181 if (all_p)
2182 mark_all_loops_for_removal ();
2183 else
2184 mark_loops_for_removal ();
2185 children_vec
2186 = VEC_alloc (ira_loop_tree_node_t, heap,
2187 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2188 removed_loop_vec
2189 = VEC_alloc (ira_loop_tree_node_t, heap,
2190 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2191 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
2192 VEC_free (ira_loop_tree_node_t, heap, children_vec);
2193 if (all_p)
2194 remove_low_level_allocnos ();
2195 else
2196 remove_unnecessary_allocnos ();
2197 while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
2198 finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
2199 VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
2204 /* At this point true value of allocno attribute bad_spill_p means
2205 that there is an insn where allocno occurs and where the allocno
2206 can not be used as memory. The function updates the attribute, now
2207 it can be true only for allocnos which can not be used as memory in
2208 an insn and in whose live ranges there is other allocno deaths.
2209 Spilling allocnos with true value will not improve the code because
2210 it will not make other allocnos colorable and additional reloads
2211 for the corresponding pseudo will be generated in reload pass for
2212 each insn it occurs.
2214 This is a trick mentioned in one classic article of Chaitin etc
2215 which is frequently omitted in other implementations of RA based on
2216 graph coloring. */
2217 static void
2218 update_bad_spill_attribute (void)
2220 int i;
2221 ira_allocno_t a;
2222 ira_allocno_iterator ai;
2223 ira_allocno_object_iterator aoi;
2224 ira_object_t obj;
2225 live_range_t r;
2226 enum reg_class cover_class;
2227 bitmap_head dead_points[N_REG_CLASSES];
2229 for (i = 0; i < ira_reg_class_cover_size; i++)
2231 cover_class = ira_reg_class_cover[i];
2232 bitmap_initialize (&dead_points[cover_class], &reg_obstack);
2234 FOR_EACH_ALLOCNO (a, ai)
2236 cover_class = ALLOCNO_COVER_CLASS (a);
2237 if (cover_class == NO_REGS)
2238 continue;
2239 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2240 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2241 bitmap_set_bit (&dead_points[cover_class], r->finish);
2243 FOR_EACH_ALLOCNO (a, ai)
2245 cover_class = ALLOCNO_COVER_CLASS (a);
2246 if (cover_class == NO_REGS)
2247 continue;
2248 if (! ALLOCNO_BAD_SPILL_P (a))
2249 continue;
2250 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2252 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2254 for (i = r->start + 1; i < r->finish; i++)
2255 if (bitmap_bit_p (&dead_points[cover_class], i))
2256 break;
2257 if (i < r->finish)
2258 break;
2260 if (r != NULL)
2262 ALLOCNO_BAD_SPILL_P (a) = false;
2263 break;
2267 for (i = 0; i < ira_reg_class_cover_size; i++)
2269 cover_class = ira_reg_class_cover[i];
2270 bitmap_clear (&dead_points[cover_class]);
2276 /* Set up minimal and maximal live range points for allocnos. */
2277 static void
2278 setup_min_max_allocno_live_range_point (void)
2280 int i;
2281 ira_allocno_t a, parent_a, cap;
2282 ira_allocno_iterator ai;
2283 #ifdef ENABLE_IRA_CHECKING
2284 ira_object_iterator oi;
2285 ira_object_t obj;
2286 #endif
2287 live_range_t r;
2288 ira_loop_tree_node_t parent;
2290 FOR_EACH_ALLOCNO (a, ai)
2292 int n = ALLOCNO_NUM_OBJECTS (a);
2293 for (i = 0; i < n; i++)
2295 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2296 r = OBJECT_LIVE_RANGES (obj);
2297 if (r == NULL)
2298 continue;
2299 OBJECT_MAX (obj) = r->finish;
2300 for (; r->next != NULL; r = r->next)
2302 OBJECT_MIN (obj) = r->start;
2305 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2306 for (a = ira_regno_allocno_map[i];
2307 a != NULL;
2308 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2310 int j;
2311 int n = ALLOCNO_NUM_OBJECTS (a);
2312 for (j = 0; j < n; j++)
2314 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2315 ira_object_t parent_obj;
2317 if (OBJECT_MAX (obj) < 0)
2318 continue;
2319 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2320 /* Accumulation of range info. */
2321 if (ALLOCNO_CAP (a) != NULL)
2323 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2325 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2326 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2327 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2328 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2329 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2331 continue;
2333 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2334 continue;
2335 parent_a = parent->regno_allocno_map[i];
2336 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2337 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2338 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2339 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2340 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2343 #ifdef ENABLE_IRA_CHECKING
2344 FOR_EACH_OBJECT (obj, oi)
2346 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2347 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2348 continue;
2349 gcc_unreachable ();
2351 #endif
2354 /* Sort allocnos according to their live ranges. Allocnos with
2355 smaller cover class are put first unless we use priority coloring.
2356 Allocnos with the same cover class are ordered according their start
2357 (min). Allocnos with the same start are ordered according their
2358 finish (max). */
2359 static int
2360 object_range_compare_func (const void *v1p, const void *v2p)
2362 int diff;
2363 ira_object_t obj1 = *(const ira_object_t *) v1p;
2364 ira_object_t obj2 = *(const ira_object_t *) v2p;
2365 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2366 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2368 if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2369 && (diff = ALLOCNO_COVER_CLASS (a1) - ALLOCNO_COVER_CLASS (a2)) != 0)
2370 return diff;
2371 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2372 return diff;
2373 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2374 return diff;
2375 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2378 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2379 static void
2380 sort_conflict_id_map (void)
2382 int i, num;
2383 ira_allocno_t a;
2384 ira_allocno_iterator ai;
2386 num = 0;
2387 FOR_EACH_ALLOCNO (a, ai)
2389 ira_allocno_object_iterator oi;
2390 ira_object_t obj;
2392 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2393 ira_object_id_map[num++] = obj;
2395 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2396 object_range_compare_func);
2397 for (i = 0; i < num; i++)
2399 ira_object_t obj = ira_object_id_map[i];
2400 gcc_assert (obj != NULL);
2401 OBJECT_CONFLICT_ID (obj) = i;
2403 for (i = num; i < ira_objects_num; i++)
2404 ira_object_id_map[i] = NULL;
2407 /* Set up minimal and maximal conflict ids of allocnos with which
2408 given allocno can conflict. */
2409 static void
2410 setup_min_max_conflict_allocno_ids (void)
2412 int cover_class;
2413 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2414 int *live_range_min, *last_lived;
2415 int word0_min, word0_max;
2416 ira_allocno_t a;
2417 ira_allocno_iterator ai;
2419 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2420 cover_class = -1;
2421 first_not_finished = -1;
2422 for (i = 0; i < ira_objects_num; i++)
2424 ira_object_t obj = ira_object_id_map[i];
2425 if (obj == NULL)
2426 continue;
2428 a = OBJECT_ALLOCNO (obj);
2430 if (cover_class < 0
2431 || (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2432 && cover_class != (int) ALLOCNO_COVER_CLASS (a)))
2434 cover_class = ALLOCNO_COVER_CLASS (a);
2435 min = i;
2436 first_not_finished = i;
2438 else
2440 start = OBJECT_MIN (obj);
2441 /* If we skip an allocno, the allocno with smaller ids will
2442 be also skipped because of the secondary sorting the
2443 range finishes (see function
2444 object_range_compare_func). */
2445 while (first_not_finished < i
2446 && start > OBJECT_MAX (ira_object_id_map
2447 [first_not_finished]))
2448 first_not_finished++;
2449 min = first_not_finished;
2451 if (min == i)
2452 /* We could increase min further in this case but it is good
2453 enough. */
2454 min++;
2455 live_range_min[i] = OBJECT_MIN (obj);
2456 OBJECT_MIN (obj) = min;
2458 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2459 cover_class = -1;
2460 filled_area_start = -1;
2461 for (i = ira_objects_num - 1; i >= 0; i--)
2463 ira_object_t obj = ira_object_id_map[i];
2464 if (obj == NULL)
2465 continue;
2467 a = OBJECT_ALLOCNO (obj);
2468 if (cover_class < 0
2469 || (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
2470 && cover_class != (int) ALLOCNO_COVER_CLASS (a)))
2472 cover_class = ALLOCNO_COVER_CLASS (a);
2473 for (j = 0; j < ira_max_point; j++)
2474 last_lived[j] = -1;
2475 filled_area_start = ira_max_point;
2477 min = live_range_min[i];
2478 finish = OBJECT_MAX (obj);
2479 max = last_lived[finish];
2480 if (max < 0)
2481 /* We could decrease max further in this case but it is good
2482 enough. */
2483 max = OBJECT_CONFLICT_ID (obj) - 1;
2484 OBJECT_MAX (obj) = max;
2485 /* In filling, we can go further A range finish to recognize
2486 intersection quickly because if the finish of subsequently
2487 processed allocno (it has smaller conflict id) range is
2488 further A range finish than they are definitely intersected
2489 (the reason for this is the allocnos with bigger conflict id
2490 have their range starts not smaller than allocnos with
2491 smaller ids. */
2492 for (j = min; j < filled_area_start; j++)
2493 last_lived[j] = i;
2494 filled_area_start = min;
2496 ira_free (last_lived);
2497 ira_free (live_range_min);
2499 /* For allocnos with more than one object, we may later record extra conflicts in
2500 subobject 0 that we cannot really know about here.
2501 For now, simply widen the min/max range of these subobjects. */
2503 word0_min = INT_MAX;
2504 word0_max = INT_MIN;
2506 FOR_EACH_ALLOCNO (a, ai)
2508 int n = ALLOCNO_NUM_OBJECTS (a);
2509 ira_object_t obj0;
2510 if (n < 2)
2511 continue;
2512 obj0 = ALLOCNO_OBJECT (a, 0);
2513 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2514 word0_min = OBJECT_CONFLICT_ID (obj0);
2515 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2516 word0_max = OBJECT_CONFLICT_ID (obj0);
2518 FOR_EACH_ALLOCNO (a, ai)
2520 int n = ALLOCNO_NUM_OBJECTS (a);
2521 ira_object_t obj0;
2522 if (n < 2)
2523 continue;
2524 obj0 = ALLOCNO_OBJECT (a, 0);
2525 if (OBJECT_MIN (obj0) > word0_min)
2526 OBJECT_MIN (obj0) = word0_min;
2527 if (OBJECT_MAX (obj0) < word0_max)
2528 OBJECT_MAX (obj0) = word0_max;
2534 static void
2535 create_caps (void)
2537 ira_allocno_t a;
2538 ira_allocno_iterator ai;
2539 ira_loop_tree_node_t loop_tree_node;
2541 FOR_EACH_ALLOCNO (a, ai)
2543 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2544 continue;
2545 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2546 create_cap_allocno (a);
2547 else if (ALLOCNO_CAP (a) == NULL)
2549 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2550 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2551 create_cap_allocno (a);
2558 /* The page contains code transforming more one region internal
2559 representation (IR) to one region IR which is necessary for reload.
2560 This transformation is called IR flattening. We might just rebuild
2561 the IR for one region but we don't do it because it takes a lot of
2562 time. */
2564 /* Map: regno -> allocnos which will finally represent the regno for
2565 IR with one region. */
2566 static ira_allocno_t *regno_top_level_allocno_map;
2568 /* Find the allocno that corresponds to A at a level one higher up in the
2569 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2570 ira_allocno_t
2571 ira_parent_allocno (ira_allocno_t a)
2573 ira_loop_tree_node_t parent;
2575 if (ALLOCNO_CAP (a) != NULL)
2576 return NULL;
2578 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2579 if (parent == NULL)
2580 return NULL;
2582 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
2585 /* Find the allocno that corresponds to A at a level one higher up in the
2586 loop tree. If ALLOCNO_CAP is set for A, return that. */
2587 ira_allocno_t
2588 ira_parent_or_cap_allocno (ira_allocno_t a)
2590 if (ALLOCNO_CAP (a) != NULL)
2591 return ALLOCNO_CAP (a);
2593 return ira_parent_allocno (a);
2596 /* Process all allocnos originated from pseudo REGNO and copy live
2597 ranges, hard reg conflicts, and allocno stack reg attributes from
2598 low level allocnos to final allocnos which are destinations of
2599 removed stores at a loop exit. Return true if we copied live
2600 ranges. */
2601 static bool
2602 copy_info_to_removed_store_destinations (int regno)
2604 ira_allocno_t a;
2605 ira_allocno_t parent_a = NULL;
2606 ira_loop_tree_node_t parent;
2607 bool merged_p;
2609 merged_p = false;
2610 for (a = ira_regno_allocno_map[regno];
2611 a != NULL;
2612 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2614 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))])
2615 /* This allocno will be removed. */
2616 continue;
2618 /* Caps will be removed. */
2619 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2620 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2621 parent != NULL;
2622 parent = parent->parent)
2623 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2624 || (parent_a == regno_top_level_allocno_map[REGNO (ALLOCNO_REG
2625 (parent_a))]
2626 && ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)))
2627 break;
2628 if (parent == NULL || parent_a == NULL)
2629 continue;
2631 copy_allocno_live_ranges (a, parent_a);
2632 merge_hard_reg_conflicts (a, parent_a, true);
2634 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2635 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2636 += ALLOCNO_CALLS_CROSSED_NUM (a);
2637 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2638 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2639 merged_p = true;
2641 return merged_p;
2644 /* Flatten the IR. In other words, this function transforms IR as if
2645 it were built with one region (without loops). We could make it
2646 much simpler by rebuilding IR with one region, but unfortunately it
2647 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
2648 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2649 IRA_MAX_POINT before emitting insns on the loop borders. */
2650 void
2651 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2653 int i, j;
2654 bool keep_p;
2655 int hard_regs_num;
2656 bool new_pseudos_p, merged_p, mem_dest_p;
2657 unsigned int n;
2658 enum reg_class cover_class;
2659 ira_allocno_t a, parent_a, first, second, node_first, node_second;
2660 ira_copy_t cp;
2661 ira_loop_tree_node_t node;
2662 live_range_t r;
2663 ira_allocno_iterator ai;
2664 ira_copy_iterator ci;
2666 regno_top_level_allocno_map
2667 = (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
2668 memset (regno_top_level_allocno_map, 0,
2669 max_reg_num () * sizeof (ira_allocno_t));
2670 new_pseudos_p = merged_p = false;
2671 FOR_EACH_ALLOCNO (a, ai)
2673 ira_allocno_object_iterator oi;
2674 ira_object_t obj;
2675 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2676 /* Caps are not in the regno allocno maps and they are never
2677 will be transformed into allocnos existing after IR
2678 flattening. */
2679 continue;
2680 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2681 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2682 OBJECT_CONFLICT_HARD_REGS (obj));
2683 #ifdef STACK_REGS
2684 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2685 #endif
2687 /* Fix final allocno attributes. */
2688 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2690 mem_dest_p = false;
2691 for (a = ira_regno_allocno_map[i];
2692 a != NULL;
2693 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2695 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2696 if (ALLOCNO_SOMEWHERE_RENAMED_P (a))
2697 new_pseudos_p = true;
2698 parent_a = ira_parent_allocno (a);
2699 if (parent_a == NULL)
2701 ALLOCNO_COPIES (a) = NULL;
2702 regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
2703 continue;
2705 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2707 if (ALLOCNO_MEM_OPTIMIZED_DEST (a) != NULL)
2708 mem_dest_p = true;
2709 if (REGNO (ALLOCNO_REG (a)) == REGNO (ALLOCNO_REG (parent_a)))
2711 merge_hard_reg_conflicts (a, parent_a, true);
2712 move_allocno_live_ranges (a, parent_a);
2713 merged_p = true;
2714 ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
2715 = (ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
2716 || ALLOCNO_MEM_OPTIMIZED_DEST_P (a));
2717 continue;
2719 new_pseudos_p = true;
2720 for (;;)
2722 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2723 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2724 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2725 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2726 -= ALLOCNO_CALLS_CROSSED_NUM (a);
2727 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2728 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2729 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2730 && ALLOCNO_NREFS (parent_a) >= 0
2731 && ALLOCNO_FREQ (parent_a) >= 0);
2732 cover_class = ALLOCNO_COVER_CLASS (parent_a);
2733 hard_regs_num = ira_class_hard_regs_num[cover_class];
2734 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2735 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2736 for (j = 0; j < hard_regs_num; j++)
2737 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2738 -= ALLOCNO_HARD_REG_COSTS (a)[j];
2739 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2740 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2741 for (j = 0; j < hard_regs_num; j++)
2742 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2743 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2744 ALLOCNO_COVER_CLASS_COST (parent_a)
2745 -= ALLOCNO_COVER_CLASS_COST (a);
2746 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2747 parent_a = ira_parent_allocno (parent_a);
2748 if (parent_a == NULL)
2749 break;
2751 ALLOCNO_COPIES (a) = NULL;
2752 regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
2754 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2755 merged_p = true;
2757 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2758 if (merged_p || ira_max_point_before_emit != ira_max_point)
2759 ira_rebuild_start_finish_chains ();
2760 if (new_pseudos_p)
2762 sparseset objects_live;
2764 /* Rebuild conflicts. */
2765 FOR_EACH_ALLOCNO (a, ai)
2767 ira_allocno_object_iterator oi;
2768 ira_object_t obj;
2769 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2770 || ALLOCNO_CAP_MEMBER (a) != NULL)
2771 continue;
2772 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2774 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2775 ira_assert (r->object == obj);
2776 clear_conflicts (obj);
2779 objects_live = sparseset_alloc (ira_objects_num);
2780 for (i = 0; i < ira_max_point; i++)
2782 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
2784 ira_object_t obj = r->object;
2785 a = OBJECT_ALLOCNO (obj);
2786 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2787 || ALLOCNO_CAP_MEMBER (a) != NULL)
2788 continue;
2790 cover_class = ALLOCNO_COVER_CLASS (a);
2791 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
2792 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
2794 ira_object_t live_obj = ira_object_id_map[n];
2795 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
2796 enum reg_class live_cover = ALLOCNO_COVER_CLASS (live_a);
2797 if (ira_reg_classes_intersect_p[cover_class][live_cover]
2798 /* Don't set up conflict for the allocno with itself. */
2799 && live_a != a)
2800 ira_add_conflict (obj, live_obj);
2804 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
2805 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
2807 sparseset_free (objects_live);
2808 compress_conflict_vecs ();
2810 /* Mark some copies for removing and change allocnos in the rest
2811 copies. */
2812 FOR_EACH_COPY (cp, ci)
2814 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
2815 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
2817 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2818 fprintf
2819 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
2820 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
2821 ALLOCNO_NUM (cp->first), REGNO (ALLOCNO_REG (cp->first)),
2822 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
2823 ALLOCNO_NUM (cp->second), REGNO (ALLOCNO_REG (cp->second)));
2824 cp->loop_tree_node = NULL;
2825 continue;
2827 first = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->first))];
2828 second = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->second))];
2829 node = cp->loop_tree_node;
2830 if (node == NULL)
2831 keep_p = true; /* It copy generated in ira-emit.c. */
2832 else
2834 /* Check that the copy was not propagated from level on
2835 which we will have different pseudos. */
2836 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
2837 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
2838 keep_p = ((REGNO (ALLOCNO_REG (first))
2839 == REGNO (ALLOCNO_REG (node_first)))
2840 && (REGNO (ALLOCNO_REG (second))
2841 == REGNO (ALLOCNO_REG (node_second))));
2843 if (keep_p)
2845 cp->loop_tree_node = ira_loop_tree_root;
2846 cp->first = first;
2847 cp->second = second;
2849 else
2851 cp->loop_tree_node = NULL;
2852 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2853 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
2854 cp->num, ALLOCNO_NUM (cp->first),
2855 REGNO (ALLOCNO_REG (cp->first)), ALLOCNO_NUM (cp->second),
2856 REGNO (ALLOCNO_REG (cp->second)));
2859 /* Remove unnecessary allocnos on lower levels of the loop tree. */
2860 FOR_EACH_ALLOCNO (a, ai)
2862 if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
2863 || ALLOCNO_CAP_MEMBER (a) != NULL)
2865 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2866 fprintf (ira_dump_file, " Remove a%dr%d\n",
2867 ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
2868 finish_allocno (a);
2869 continue;
2871 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2872 ALLOCNO_REGNO (a) = REGNO (ALLOCNO_REG (a));
2873 ALLOCNO_CAP (a) = NULL;
2874 /* Restore updated costs for assignments from reload. */
2875 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
2876 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
2877 if (! ALLOCNO_ASSIGNED_P (a))
2878 ira_free_allocno_updated_costs (a);
2879 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2880 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2882 /* Remove unnecessary copies. */
2883 FOR_EACH_COPY (cp, ci)
2885 if (cp->loop_tree_node == NULL)
2887 ira_copies[cp->num] = NULL;
2888 finish_copy (cp);
2889 continue;
2891 ira_assert
2892 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
2893 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
2894 ira_add_allocno_copy_to_list (cp);
2895 ira_swap_allocno_copy_ends_if_necessary (cp);
2897 rebuild_regno_allocno_maps ();
2898 if (ira_max_point != ira_max_point_before_emit)
2899 ira_compress_allocno_live_ranges ();
2900 ira_free (regno_top_level_allocno_map);
2905 #ifdef ENABLE_IRA_CHECKING
2906 /* Check creation of all allocnos. Allocnos on lower levels should
2907 have allocnos or caps on all upper levels. */
2908 static void
2909 check_allocno_creation (void)
2911 ira_allocno_t a;
2912 ira_allocno_iterator ai;
2913 ira_loop_tree_node_t loop_tree_node;
2915 FOR_EACH_ALLOCNO (a, ai)
2917 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2918 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
2919 ALLOCNO_NUM (a)));
2920 if (loop_tree_node == ira_loop_tree_root)
2921 continue;
2922 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2923 ira_assert (ALLOCNO_CAP (a) != NULL);
2924 else if (ALLOCNO_CAP (a) == NULL)
2925 ira_assert (loop_tree_node->parent
2926 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
2927 && bitmap_bit_p (loop_tree_node->border_allocnos,
2928 ALLOCNO_NUM (a)));
2931 #endif
2933 /* Identify allocnos which prefer a register class with a single hard register.
2934 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
2935 less likely to use the preferred singleton register. */
2936 static void
2937 update_conflict_hard_reg_costs (void)
2939 ira_allocno_t a;
2940 ira_allocno_iterator ai;
2941 int i, index, min;
2943 FOR_EACH_ALLOCNO (a, ai)
2945 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
2946 enum reg_class pref = reg_preferred_class (ALLOCNO_REGNO (a));
2948 if (reg_class_size[pref] != 1)
2949 continue;
2950 index = (ira_class_hard_reg_index[cover_class]
2951 [ira_class_hard_regs[pref][0]]);
2952 if (index < 0)
2953 continue;
2954 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
2955 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
2956 continue;
2957 min = INT_MAX;
2958 for (i = ira_class_hard_regs_num[cover_class] - 1; i >= 0; i--)
2959 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_COVER_CLASS_COST (a)
2960 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
2961 min = ALLOCNO_HARD_REG_COSTS (a)[i];
2962 if (min == INT_MAX)
2963 continue;
2964 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2965 cover_class, 0);
2966 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
2967 -= min - ALLOCNO_COVER_CLASS_COST (a);
2971 /* Create a internal representation (IR) for IRA (allocnos, copies,
2972 loop tree nodes). If LOOPS_P is FALSE the nodes corresponding to
2973 the loops (except the root which corresponds the all function) and
2974 correspondingly allocnos for the loops will be not created. Such
2975 parameter value is used for Chaitin-Briggs coloring. The function
2976 returns TRUE if we generate loop structure (besides nodes
2977 representing all function and the basic blocks) for regional
2978 allocation. A true return means that we really need to flatten IR
2979 before the reload. */
2980 bool
2981 ira_build (bool loops_p)
2983 df_analyze ();
2985 initiate_cost_vectors ();
2986 initiate_allocnos ();
2987 initiate_copies ();
2988 create_loop_tree_nodes (loops_p);
2989 form_loop_tree ();
2990 create_allocnos ();
2991 ira_costs ();
2992 create_allocno_objects ();
2993 ira_create_allocno_live_ranges ();
2994 remove_unnecessary_regions (false);
2995 ira_compress_allocno_live_ranges ();
2996 update_bad_spill_attribute ();
2997 loops_p = more_one_region_p ();
2998 if (loops_p)
3000 propagate_allocno_info ();
3001 create_caps ();
3003 ira_tune_allocno_costs_and_cover_classes ();
3004 #ifdef ENABLE_IRA_CHECKING
3005 check_allocno_creation ();
3006 #endif
3007 setup_min_max_allocno_live_range_point ();
3008 sort_conflict_id_map ();
3009 setup_min_max_conflict_allocno_ids ();
3010 ira_build_conflicts ();
3011 update_conflict_hard_reg_costs ();
3012 if (! ira_conflicts_p)
3014 ira_allocno_t a;
3015 ira_allocno_iterator ai;
3017 /* Remove all regions but root one. */
3018 if (loops_p)
3020 remove_unnecessary_regions (true);
3021 loops_p = false;
3023 /* We don't save hard registers around calls for fast allocation
3024 -- add caller clobbered registers as conflicting ones to
3025 allocno crossing calls. */
3026 FOR_EACH_ALLOCNO (a, ai)
3027 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3028 ior_hard_reg_conflicts (a, &call_used_reg_set);
3030 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3031 print_copies (ira_dump_file);
3032 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3034 int n, nr, nr_big;
3035 ira_allocno_t a;
3036 live_range_t r;
3037 ira_allocno_iterator ai;
3039 n = 0;
3040 nr = 0;
3041 nr_big = 0;
3042 FOR_EACH_ALLOCNO (a, ai)
3044 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3045 if (nobj > 1)
3046 nr_big++;
3047 for (j = 0; j < nobj; j++)
3049 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3050 n += OBJECT_NUM_CONFLICTS (obj);
3051 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3052 nr++;
3055 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3056 VEC_length (loop_p, ira_loops.larray), n_basic_blocks,
3057 ira_max_point);
3058 fprintf (ira_dump_file,
3059 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3060 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3062 return loops_p;
3065 /* Release the data created by function ira_build. */
3066 void
3067 ira_destroy (void)
3069 finish_loop_tree_nodes ();
3070 finish_copies ();
3071 finish_allocnos ();
3072 finish_cost_vectors ();
3073 ira_finish_allocno_live_ranges ();