PR tree-optimization/45830
[official-gcc.git] / gcc / ira-build.c
blob2d8708a4e65e08ed4562a71672963a37a3353bb6
1 /* Building internal representation for IRA.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "params.h"
37 #include "df.h"
38 #include "output.h"
39 #include "reload.h"
40 #include "sparseset.h"
41 #include "ira-int.h"
42 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
44 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
45 ira_loop_tree_node_t);
47 /* The root of the loop tree corresponding to the all function. */
48 ira_loop_tree_node_t ira_loop_tree_root;
50 /* Height of the loop tree. */
51 int ira_loop_tree_height;
53 /* All nodes representing basic blocks are referred through the
54 following array. We can not use basic block member `aux' for this
55 because it is used for insertion of insns on edges. */
56 ira_loop_tree_node_t ira_bb_nodes;
58 /* All nodes representing loops are referred through the following
59 array. */
60 ira_loop_tree_node_t ira_loop_nodes;
62 /* Map regno -> allocnos with given regno (see comments for
63 allocno member `next_regno_allocno'). */
64 ira_allocno_t *ira_regno_allocno_map;
66 /* Array of references to all allocnos. The order number of the
67 allocno corresponds to the index in the array. Removed allocnos
68 have NULL element value. */
69 ira_allocno_t *ira_allocnos;
71 /* Sizes of the previous array. */
72 int ira_allocnos_num;
74 /* Count of conflict record structures we've created, used when creating
75 a new conflict id. */
76 int ira_objects_num;
78 /* Map a conflict id to its conflict record. */
79 ira_object_t *ira_object_id_map;
81 /* Array of references to all copies. The order number of the copy
82 corresponds to the index in the array. Removed copies have NULL
83 element value. */
84 ira_copy_t *ira_copies;
86 /* Size of the previous array. */
87 int ira_copies_num;
91 /* LAST_BASIC_BLOCK before generating additional insns because of live
92 range splitting. Emitting insns on a critical edge creates a new
93 basic block. */
94 static int last_basic_block_before_change;
96 /* The following function allocates the loop tree nodes. If LOOPS_P
97 is FALSE, the nodes corresponding to the loops (except the root
98 which corresponds the all function) will be not allocated but nodes
99 will still be allocated for basic blocks. */
100 static void
101 create_loop_tree_nodes (bool loops_p)
103 unsigned int i, j;
104 int max_regno;
105 bool skip_p;
106 edge_iterator ei;
107 edge e;
108 VEC (edge, heap) *edges;
109 loop_p loop;
111 ira_bb_nodes
112 = ((struct ira_loop_tree_node *)
113 ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
114 last_basic_block_before_change = last_basic_block;
115 for (i = 0; i < (unsigned int) last_basic_block; i++)
117 ira_bb_nodes[i].regno_allocno_map = NULL;
118 memset (ira_bb_nodes[i].reg_pressure, 0,
119 sizeof (ira_bb_nodes[i].reg_pressure));
120 ira_bb_nodes[i].all_allocnos = NULL;
121 ira_bb_nodes[i].modified_regnos = NULL;
122 ira_bb_nodes[i].border_allocnos = NULL;
123 ira_bb_nodes[i].local_copies = NULL;
125 ira_loop_nodes = ((struct ira_loop_tree_node *)
126 ira_allocate (sizeof (struct ira_loop_tree_node)
127 * VEC_length (loop_p, ira_loops.larray)));
128 max_regno = max_reg_num ();
129 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
131 if (loop != ira_loops.tree_root)
133 ira_loop_nodes[i].regno_allocno_map = NULL;
134 if (! loops_p)
135 continue;
136 skip_p = false;
137 FOR_EACH_EDGE (e, ei, loop->header->preds)
138 if (e->src != loop->latch
139 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
141 skip_p = true;
142 break;
144 if (skip_p)
145 continue;
146 edges = get_loop_exit_edges (loop);
147 FOR_EACH_VEC_ELT (edge, edges, j, e)
148 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
150 skip_p = true;
151 break;
153 VEC_free (edge, heap, edges);
154 if (skip_p)
155 continue;
157 ira_loop_nodes[i].regno_allocno_map
158 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
159 memset (ira_loop_nodes[i].regno_allocno_map, 0,
160 sizeof (ira_allocno_t) * max_regno);
161 memset (ira_loop_nodes[i].reg_pressure, 0,
162 sizeof (ira_loop_nodes[i].reg_pressure));
163 ira_loop_nodes[i].all_allocnos = ira_allocate_bitmap ();
164 ira_loop_nodes[i].modified_regnos = ira_allocate_bitmap ();
165 ira_loop_nodes[i].border_allocnos = ira_allocate_bitmap ();
166 ira_loop_nodes[i].local_copies = ira_allocate_bitmap ();
170 /* The function returns TRUE if there are more one allocation
171 region. */
172 static bool
173 more_one_region_p (void)
175 unsigned int i;
176 loop_p loop;
178 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
179 if (ira_loop_nodes[i].regno_allocno_map != NULL
180 && ira_loop_tree_root != &ira_loop_nodes[i])
181 return true;
182 return false;
185 /* Free the loop tree node of a loop. */
186 static void
187 finish_loop_tree_node (ira_loop_tree_node_t loop)
189 if (loop->regno_allocno_map != NULL)
191 ira_assert (loop->bb == NULL);
192 ira_free_bitmap (loop->local_copies);
193 ira_free_bitmap (loop->border_allocnos);
194 ira_free_bitmap (loop->modified_regnos);
195 ira_free_bitmap (loop->all_allocnos);
196 ira_free (loop->regno_allocno_map);
197 loop->regno_allocno_map = NULL;
201 /* Free the loop tree nodes. */
202 static void
203 finish_loop_tree_nodes (void)
205 unsigned int i;
206 loop_p loop;
208 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
209 finish_loop_tree_node (&ira_loop_nodes[i]);
210 ira_free (ira_loop_nodes);
211 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
213 if (ira_bb_nodes[i].local_copies != NULL)
214 ira_free_bitmap (ira_bb_nodes[i].local_copies);
215 if (ira_bb_nodes[i].border_allocnos != NULL)
216 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
217 if (ira_bb_nodes[i].modified_regnos != NULL)
218 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
219 if (ira_bb_nodes[i].all_allocnos != NULL)
220 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
221 if (ira_bb_nodes[i].regno_allocno_map != NULL)
222 ira_free (ira_bb_nodes[i].regno_allocno_map);
224 ira_free (ira_bb_nodes);
229 /* The following recursive function adds LOOP to the loop tree
230 hierarchy. LOOP is added only once. */
231 static void
232 add_loop_to_tree (struct loop *loop)
234 struct loop *parent;
235 ira_loop_tree_node_t loop_node, parent_node;
237 /* We can not use loop node access macros here because of potential
238 checking and because the nodes are not initialized enough
239 yet. */
240 if (loop_outer (loop) != NULL)
241 add_loop_to_tree (loop_outer (loop));
242 if (ira_loop_nodes[loop->num].regno_allocno_map != NULL
243 && ira_loop_nodes[loop->num].children == NULL)
245 /* We have not added loop node to the tree yet. */
246 loop_node = &ira_loop_nodes[loop->num];
247 loop_node->loop = loop;
248 loop_node->bb = NULL;
249 for (parent = loop_outer (loop);
250 parent != NULL;
251 parent = loop_outer (parent))
252 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
253 break;
254 if (parent == NULL)
256 loop_node->next = NULL;
257 loop_node->subloop_next = NULL;
258 loop_node->parent = NULL;
260 else
262 parent_node = &ira_loop_nodes[parent->num];
263 loop_node->next = parent_node->children;
264 parent_node->children = loop_node;
265 loop_node->subloop_next = parent_node->subloops;
266 parent_node->subloops = loop_node;
267 loop_node->parent = parent_node;
272 /* The following recursive function sets up levels of nodes of the
273 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
274 The function returns maximal value of level in the tree + 1. */
275 static int
276 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
278 int height, max_height;
279 ira_loop_tree_node_t subloop_node;
281 ira_assert (loop_node->bb == NULL);
282 loop_node->level = level;
283 max_height = level + 1;
284 for (subloop_node = loop_node->subloops;
285 subloop_node != NULL;
286 subloop_node = subloop_node->subloop_next)
288 ira_assert (subloop_node->bb == NULL);
289 height = setup_loop_tree_level (subloop_node, level + 1);
290 if (height > max_height)
291 max_height = height;
293 return max_height;
296 /* Create the loop tree. The algorithm is designed to provide correct
297 order of loops (they are ordered by their last loop BB) and basic
298 blocks in the chain formed by member next. */
299 static void
300 form_loop_tree (void)
302 unsigned int i;
303 basic_block bb;
304 struct loop *parent;
305 ira_loop_tree_node_t bb_node, loop_node;
306 loop_p loop;
308 /* We can not use loop/bb node access macros because of potential
309 checking and because the nodes are not initialized enough
310 yet. */
311 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
312 if (ira_loop_nodes[i].regno_allocno_map != NULL)
314 ira_loop_nodes[i].children = NULL;
315 ira_loop_nodes[i].subloops = NULL;
317 FOR_EACH_BB (bb)
319 bb_node = &ira_bb_nodes[bb->index];
320 bb_node->bb = bb;
321 bb_node->loop = NULL;
322 bb_node->subloops = NULL;
323 bb_node->children = NULL;
324 bb_node->subloop_next = NULL;
325 bb_node->next = NULL;
326 for (parent = bb->loop_father;
327 parent != NULL;
328 parent = loop_outer (parent))
329 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
330 break;
331 add_loop_to_tree (parent);
332 loop_node = &ira_loop_nodes[parent->num];
333 bb_node->next = loop_node->children;
334 bb_node->parent = loop_node;
335 loop_node->children = bb_node;
337 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (ira_loops.tree_root->num);
338 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
339 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
344 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
345 tree nodes. */
346 static void
347 rebuild_regno_allocno_maps (void)
349 unsigned int l;
350 int max_regno, regno;
351 ira_allocno_t a;
352 ira_loop_tree_node_t loop_tree_node;
353 loop_p loop;
354 ira_allocno_iterator ai;
356 max_regno = max_reg_num ();
357 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, l, loop)
358 if (ira_loop_nodes[l].regno_allocno_map != NULL)
360 ira_free (ira_loop_nodes[l].regno_allocno_map);
361 ira_loop_nodes[l].regno_allocno_map
362 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
363 * max_regno);
364 memset (ira_loop_nodes[l].regno_allocno_map, 0,
365 sizeof (ira_allocno_t) * max_regno);
367 ira_free (ira_regno_allocno_map);
368 ira_regno_allocno_map
369 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
370 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
371 FOR_EACH_ALLOCNO (a, ai)
373 if (ALLOCNO_CAP_MEMBER (a) != NULL)
374 /* Caps are not in the regno allocno maps. */
375 continue;
376 regno = ALLOCNO_REGNO (a);
377 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
378 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
379 ira_regno_allocno_map[regno] = a;
380 if (loop_tree_node->regno_allocno_map[regno] == NULL)
381 /* Remember that we can create temporary allocnos to break
382 cycles in register shuffle. */
383 loop_tree_node->regno_allocno_map[regno] = a;
388 /* Pools for allocnos, allocno live ranges and objects. */
389 static alloc_pool allocno_pool, live_range_pool, object_pool;
391 /* Vec containing references to all created allocnos. It is a
392 container of array allocnos. */
393 static VEC(ira_allocno_t,heap) *allocno_vec;
395 /* Vec containing references to all created ira_objects. It is a
396 container of ira_object_id_map. */
397 static VEC(ira_object_t,heap) *ira_object_id_map_vec;
399 /* Initialize data concerning allocnos. */
400 static void
401 initiate_allocnos (void)
403 live_range_pool
404 = create_alloc_pool ("live ranges",
405 sizeof (struct live_range), 100);
406 allocno_pool
407 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
408 object_pool
409 = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
410 allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
411 ira_allocnos = NULL;
412 ira_allocnos_num = 0;
413 ira_objects_num = 0;
414 ira_object_id_map_vec
415 = VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
416 ira_object_id_map = NULL;
417 ira_regno_allocno_map
418 = (ira_allocno_t *) ira_allocate (max_reg_num ()
419 * sizeof (ira_allocno_t));
420 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
423 /* Create and return an object corresponding to a new allocno A. */
424 static ira_object_t
425 ira_create_object (ira_allocno_t a, int subword)
427 enum reg_class aclass = ALLOCNO_CLASS (a);
428 ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
430 OBJECT_ALLOCNO (obj) = a;
431 OBJECT_SUBWORD (obj) = subword;
432 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
433 OBJECT_CONFLICT_VEC_P (obj) = false;
434 OBJECT_CONFLICT_ARRAY (obj) = NULL;
435 OBJECT_NUM_CONFLICTS (obj) = 0;
436 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
437 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
438 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
439 reg_class_contents[aclass]);
440 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
441 reg_class_contents[aclass]);
442 OBJECT_MIN (obj) = INT_MAX;
443 OBJECT_MAX (obj) = -1;
444 OBJECT_LIVE_RANGES (obj) = NULL;
446 VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
447 ira_object_id_map
448 = VEC_address (ira_object_t, ira_object_id_map_vec);
449 ira_objects_num = VEC_length (ira_object_t, ira_object_id_map_vec);
451 return obj;
454 /* Create and return the allocno corresponding to REGNO in
455 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
456 same regno if CAP_P is FALSE. */
457 ira_allocno_t
458 ira_create_allocno (int regno, bool cap_p,
459 ira_loop_tree_node_t loop_tree_node)
461 ira_allocno_t a;
463 a = (ira_allocno_t) pool_alloc (allocno_pool);
464 ALLOCNO_REGNO (a) = regno;
465 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
466 if (! cap_p)
468 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
469 ira_regno_allocno_map[regno] = a;
470 if (loop_tree_node->regno_allocno_map[regno] == NULL)
471 /* Remember that we can create temporary allocnos to break
472 cycles in register shuffle on region borders (see
473 ira-emit.c). */
474 loop_tree_node->regno_allocno_map[regno] = a;
476 ALLOCNO_CAP (a) = NULL;
477 ALLOCNO_CAP_MEMBER (a) = NULL;
478 ALLOCNO_NUM (a) = ira_allocnos_num;
479 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
480 ALLOCNO_NREFS (a) = 0;
481 ALLOCNO_FREQ (a) = 0;
482 ALLOCNO_HARD_REGNO (a) = -1;
483 ALLOCNO_CALL_FREQ (a) = 0;
484 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
485 #ifdef STACK_REGS
486 ALLOCNO_NO_STACK_REG_P (a) = false;
487 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
488 #endif
489 ALLOCNO_DONT_REASSIGN_P (a) = false;
490 ALLOCNO_BAD_SPILL_P (a) = false;
491 ALLOCNO_ASSIGNED_P (a) = false;
492 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
493 ALLOCNO_COPIES (a) = NULL;
494 ALLOCNO_HARD_REG_COSTS (a) = NULL;
495 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
496 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
497 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
498 ALLOCNO_CLASS (a) = NO_REGS;
499 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
500 ALLOCNO_CLASS_COST (a) = 0;
501 ALLOCNO_MEMORY_COST (a) = 0;
502 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
503 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
504 ALLOCNO_NUM_OBJECTS (a) = 0;
506 ALLOCNO_ADD_DATA (a) = NULL;
507 VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
508 ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
509 ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
511 return a;
514 /* Set up register class for A and update its conflict hard
515 registers. */
516 void
517 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
519 ira_allocno_object_iterator oi;
520 ira_object_t obj;
522 ALLOCNO_CLASS (a) = aclass;
523 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
525 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
526 reg_class_contents[aclass]);
527 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
528 reg_class_contents[aclass]);
532 /* Determine the number of objects we should associate with allocno A
533 and allocate them. */
534 void
535 ira_create_allocno_objects (ira_allocno_t a)
537 enum machine_mode mode = ALLOCNO_MODE (a);
538 enum reg_class aclass = ALLOCNO_CLASS (a);
539 int n = ira_reg_class_max_nregs[aclass][mode];
540 int i;
542 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
543 n = 1;
545 ALLOCNO_NUM_OBJECTS (a) = n;
546 for (i = 0; i < n; i++)
547 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
550 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
551 ALLOCNO_OBJECT structures. This must be called after the allocno
552 classes are known. */
553 static void
554 create_allocno_objects (void)
556 ira_allocno_t a;
557 ira_allocno_iterator ai;
559 FOR_EACH_ALLOCNO (a, ai)
560 ira_create_allocno_objects (a);
563 /* Merge hard register conflict information for all objects associated with
564 allocno TO into the corresponding objects associated with FROM.
565 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
566 static void
567 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
568 bool total_only)
570 int i;
571 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
572 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
574 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
575 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
577 if (!total_only)
578 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
579 OBJECT_CONFLICT_HARD_REGS (from_obj));
580 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
581 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
583 #ifdef STACK_REGS
584 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
585 ALLOCNO_NO_STACK_REG_P (to) = true;
586 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
587 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
588 #endif
591 /* Update hard register conflict information for all objects associated with
592 A to include the regs in SET. */
593 void
594 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
596 ira_allocno_object_iterator i;
597 ira_object_t obj;
599 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
601 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
602 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
606 /* Return TRUE if a conflict vector with NUM elements is more
607 profitable than a conflict bit vector for OBJ. */
608 bool
609 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
611 int nw;
612 int max = OBJECT_MAX (obj);
613 int min = OBJECT_MIN (obj);
615 if (max < min)
616 /* We prefer a bit vector in such case because it does not result
617 in allocation. */
618 return false;
620 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
621 return (2 * sizeof (ira_object_t) * (num + 1)
622 < 3 * nw * sizeof (IRA_INT_TYPE));
625 /* Allocates and initialize the conflict vector of OBJ for NUM
626 conflicting objects. */
627 void
628 ira_allocate_conflict_vec (ira_object_t obj, int num)
630 int size;
631 ira_object_t *vec;
633 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
634 num++; /* for NULL end marker */
635 size = sizeof (ira_object_t) * num;
636 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
637 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
638 vec[0] = NULL;
639 OBJECT_NUM_CONFLICTS (obj) = 0;
640 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
641 OBJECT_CONFLICT_VEC_P (obj) = true;
644 /* Allocate and initialize the conflict bit vector of OBJ. */
645 static void
646 allocate_conflict_bit_vec (ira_object_t obj)
648 unsigned int size;
650 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
651 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
652 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
653 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
654 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
655 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
656 OBJECT_CONFLICT_VEC_P (obj) = false;
659 /* Allocate and initialize the conflict vector or conflict bit vector
660 of OBJ for NUM conflicting allocnos whatever is more profitable. */
661 void
662 ira_allocate_object_conflicts (ira_object_t obj, int num)
664 if (ira_conflict_vector_profitable_p (obj, num))
665 ira_allocate_conflict_vec (obj, num);
666 else
667 allocate_conflict_bit_vec (obj);
670 /* Add OBJ2 to the conflicts of OBJ1. */
671 static void
672 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
674 int num;
675 unsigned int size;
677 if (OBJECT_CONFLICT_VEC_P (obj1))
679 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
680 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
681 num = curr_num + 2;
682 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
684 ira_object_t *newvec;
685 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
686 newvec = (ira_object_t *) ira_allocate (size);
687 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
688 ira_free (vec);
689 vec = newvec;
690 OBJECT_CONFLICT_ARRAY (obj1) = vec;
691 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
693 vec[num - 2] = obj2;
694 vec[num - 1] = NULL;
695 OBJECT_NUM_CONFLICTS (obj1)++;
697 else
699 int nw, added_head_nw, id;
700 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
702 id = OBJECT_CONFLICT_ID (obj2);
703 if (OBJECT_MIN (obj1) > id)
705 /* Expand head of the bit vector. */
706 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
707 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
708 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
709 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
711 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
712 vec, nw * sizeof (IRA_INT_TYPE));
713 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
715 else
717 size
718 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
719 vec = (IRA_INT_TYPE *) ira_allocate (size);
720 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
721 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
722 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
723 memset ((char *) vec
724 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
725 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
726 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
727 OBJECT_CONFLICT_ARRAY (obj1) = vec;
728 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
730 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
732 else if (OBJECT_MAX (obj1) < id)
734 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
735 size = nw * sizeof (IRA_INT_TYPE);
736 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
738 /* Expand tail of the bit vector. */
739 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
740 vec = (IRA_INT_TYPE *) ira_allocate (size);
741 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
742 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
743 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
744 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
745 OBJECT_CONFLICT_ARRAY (obj1) = vec;
746 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
748 OBJECT_MAX (obj1) = id;
750 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
754 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
755 static void
756 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
758 add_to_conflicts (obj1, obj2);
759 add_to_conflicts (obj2, obj1);
762 /* Clear all conflicts of OBJ. */
763 static void
764 clear_conflicts (ira_object_t obj)
766 if (OBJECT_CONFLICT_VEC_P (obj))
768 OBJECT_NUM_CONFLICTS (obj) = 0;
769 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
771 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
773 int nw;
775 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
776 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
780 /* The array used to find duplications in conflict vectors of
781 allocnos. */
782 static int *conflict_check;
784 /* The value used to mark allocation presence in conflict vector of
785 the current allocno. */
786 static int curr_conflict_check_tick;
788 /* Remove duplications in conflict vector of OBJ. */
789 static void
790 compress_conflict_vec (ira_object_t obj)
792 ira_object_t *vec, conflict_obj;
793 int i, j;
795 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
796 vec = OBJECT_CONFLICT_VEC (obj);
797 curr_conflict_check_tick++;
798 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
800 int id = OBJECT_CONFLICT_ID (conflict_obj);
801 if (conflict_check[id] != curr_conflict_check_tick)
803 conflict_check[id] = curr_conflict_check_tick;
804 vec[j++] = conflict_obj;
807 OBJECT_NUM_CONFLICTS (obj) = j;
808 vec[j] = NULL;
811 /* Remove duplications in conflict vectors of all allocnos. */
812 static void
813 compress_conflict_vecs (void)
815 ira_object_t obj;
816 ira_object_iterator oi;
818 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
819 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
820 curr_conflict_check_tick = 0;
821 FOR_EACH_OBJECT (obj, oi)
823 if (OBJECT_CONFLICT_VEC_P (obj))
824 compress_conflict_vec (obj);
826 ira_free (conflict_check);
829 /* This recursive function outputs allocno A and if it is a cap the
830 function outputs its members. */
831 void
832 ira_print_expanded_allocno (ira_allocno_t a)
834 basic_block bb;
836 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
837 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
838 fprintf (ira_dump_file, ",b%d", bb->index);
839 else
840 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
841 if (ALLOCNO_CAP_MEMBER (a) != NULL)
843 fprintf (ira_dump_file, ":");
844 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
846 fprintf (ira_dump_file, ")");
849 /* Create and return the cap representing allocno A in the
850 parent loop. */
851 static ira_allocno_t
852 create_cap_allocno (ira_allocno_t a)
854 ira_allocno_t cap;
855 ira_loop_tree_node_t parent;
856 enum reg_class aclass;
858 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
859 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
860 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
861 aclass = ALLOCNO_CLASS (a);
862 ira_set_allocno_class (cap, aclass);
863 ira_create_allocno_objects (cap);
864 ALLOCNO_CAP_MEMBER (cap) = a;
865 ALLOCNO_CAP (a) = cap;
866 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
867 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
868 ira_allocate_and_copy_costs
869 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
870 ira_allocate_and_copy_costs
871 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
872 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
873 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
874 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
875 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
876 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
878 merge_hard_reg_conflicts (a, cap, false);
880 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
881 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
883 fprintf (ira_dump_file, " Creating cap ");
884 ira_print_expanded_allocno (cap);
885 fprintf (ira_dump_file, "\n");
887 return cap;
890 /* Create and return a live range for OBJECT with given attributes. */
891 live_range_t
892 ira_create_live_range (ira_object_t obj, int start, int finish,
893 live_range_t next)
895 live_range_t p;
897 p = (live_range_t) pool_alloc (live_range_pool);
898 p->object = obj;
899 p->start = start;
900 p->finish = finish;
901 p->next = next;
902 return p;
905 /* Create a new live range for OBJECT and queue it at the head of its
906 live range list. */
907 void
908 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
910 live_range_t p;
911 p = ira_create_live_range (object, start, finish,
912 OBJECT_LIVE_RANGES (object));
913 OBJECT_LIVE_RANGES (object) = p;
916 /* Copy allocno live range R and return the result. */
917 static live_range_t
918 copy_live_range (live_range_t r)
920 live_range_t p;
922 p = (live_range_t) pool_alloc (live_range_pool);
923 *p = *r;
924 return p;
927 /* Copy allocno live range list given by its head R and return the
928 result. */
929 live_range_t
930 ira_copy_live_range_list (live_range_t r)
932 live_range_t p, first, last;
934 if (r == NULL)
935 return NULL;
936 for (first = last = NULL; r != NULL; r = r->next)
938 p = copy_live_range (r);
939 if (first == NULL)
940 first = p;
941 else
942 last->next = p;
943 last = p;
945 return first;
948 /* Merge ranges R1 and R2 and returns the result. The function
949 maintains the order of ranges and tries to minimize number of the
950 result ranges. */
951 live_range_t
952 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
954 live_range_t first, last, temp;
956 if (r1 == NULL)
957 return r2;
958 if (r2 == NULL)
959 return r1;
960 for (first = last = NULL; r1 != NULL && r2 != NULL;)
962 if (r1->start < r2->start)
964 temp = r1;
965 r1 = r2;
966 r2 = temp;
968 if (r1->start <= r2->finish + 1)
970 /* Intersected ranges: merge r1 and r2 into r1. */
971 r1->start = r2->start;
972 if (r1->finish < r2->finish)
973 r1->finish = r2->finish;
974 temp = r2;
975 r2 = r2->next;
976 ira_finish_live_range (temp);
977 if (r2 == NULL)
979 /* To try to merge with subsequent ranges in r1. */
980 r2 = r1->next;
981 r1->next = NULL;
984 else
986 /* Add r1 to the result. */
987 if (first == NULL)
988 first = last = r1;
989 else
991 last->next = r1;
992 last = r1;
994 r1 = r1->next;
995 if (r1 == NULL)
997 /* To try to merge with subsequent ranges in r2. */
998 r1 = r2->next;
999 r2->next = NULL;
1003 if (r1 != NULL)
1005 if (first == NULL)
1006 first = r1;
1007 else
1008 last->next = r1;
1009 ira_assert (r1->next == NULL);
1011 else if (r2 != NULL)
1013 if (first == NULL)
1014 first = r2;
1015 else
1016 last->next = r2;
1017 ira_assert (r2->next == NULL);
1019 else
1021 ira_assert (last->next == NULL);
1023 return first;
1026 /* Return TRUE if live ranges R1 and R2 intersect. */
1027 bool
1028 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1030 /* Remember the live ranges are always kept ordered. */
1031 while (r1 != NULL && r2 != NULL)
1033 if (r1->start > r2->finish)
1034 r1 = r1->next;
1035 else if (r2->start > r1->finish)
1036 r2 = r2->next;
1037 else
1038 return true;
1040 return false;
1043 /* Free allocno live range R. */
1044 void
1045 ira_finish_live_range (live_range_t r)
1047 pool_free (live_range_pool, r);
1050 /* Free list of allocno live ranges starting with R. */
1051 void
1052 ira_finish_live_range_list (live_range_t r)
1054 live_range_t next_r;
1056 for (; r != NULL; r = next_r)
1058 next_r = r->next;
1059 ira_finish_live_range (r);
1063 /* Free updated register costs of allocno A. */
1064 void
1065 ira_free_allocno_updated_costs (ira_allocno_t a)
1067 enum reg_class aclass;
1069 aclass = ALLOCNO_CLASS (a);
1070 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1071 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1072 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1073 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1074 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1075 aclass);
1076 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1079 /* Free and nullify all cost vectors allocated earlier for allocno
1080 A. */
1081 static void
1082 ira_free_allocno_costs (ira_allocno_t a)
1084 enum reg_class aclass = ALLOCNO_CLASS (a);
1085 ira_object_t obj;
1086 ira_allocno_object_iterator oi;
1088 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1090 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1091 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1092 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1093 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1094 pool_free (object_pool, obj);
1097 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1098 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1099 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1100 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1101 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1102 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1103 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1104 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1105 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1106 aclass);
1107 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1108 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1109 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1110 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1113 /* Free the memory allocated for allocno A. */
1114 static void
1115 finish_allocno (ira_allocno_t a)
1117 ira_free_allocno_costs (a);
1118 pool_free (allocno_pool, a);
1121 /* Free the memory allocated for all allocnos. */
1122 static void
1123 finish_allocnos (void)
1125 ira_allocno_t a;
1126 ira_allocno_iterator ai;
1128 FOR_EACH_ALLOCNO (a, ai)
1129 finish_allocno (a);
1130 ira_free (ira_regno_allocno_map);
1131 VEC_free (ira_object_t, heap, ira_object_id_map_vec);
1132 VEC_free (ira_allocno_t, heap, allocno_vec);
1133 free_alloc_pool (allocno_pool);
1134 free_alloc_pool (object_pool);
1135 free_alloc_pool (live_range_pool);
1140 /* Pools for copies. */
1141 static alloc_pool copy_pool;
1143 /* Vec containing references to all created copies. It is a
1144 container of array ira_copies. */
1145 static VEC(ira_copy_t,heap) *copy_vec;
1147 /* The function initializes data concerning allocno copies. */
1148 static void
1149 initiate_copies (void)
1151 copy_pool
1152 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1153 copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
1154 ira_copies = NULL;
1155 ira_copies_num = 0;
1158 /* Return copy connecting A1 and A2 and originated from INSN of
1159 LOOP_TREE_NODE if any. */
1160 static ira_copy_t
1161 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1162 ira_loop_tree_node_t loop_tree_node)
1164 ira_copy_t cp, next_cp;
1165 ira_allocno_t another_a;
1167 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1169 if (cp->first == a1)
1171 next_cp = cp->next_first_allocno_copy;
1172 another_a = cp->second;
1174 else if (cp->second == a1)
1176 next_cp = cp->next_second_allocno_copy;
1177 another_a = cp->first;
1179 else
1180 gcc_unreachable ();
1181 if (another_a == a2 && cp->insn == insn
1182 && cp->loop_tree_node == loop_tree_node)
1183 return cp;
1185 return NULL;
1188 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1189 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1190 ira_copy_t
1191 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1192 bool constraint_p, rtx insn,
1193 ira_loop_tree_node_t loop_tree_node)
1195 ira_copy_t cp;
1197 cp = (ira_copy_t) pool_alloc (copy_pool);
1198 cp->num = ira_copies_num;
1199 cp->first = first;
1200 cp->second = second;
1201 cp->freq = freq;
1202 cp->constraint_p = constraint_p;
1203 cp->insn = insn;
1204 cp->loop_tree_node = loop_tree_node;
1205 VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
1206 ira_copies = VEC_address (ira_copy_t, copy_vec);
1207 ira_copies_num = VEC_length (ira_copy_t, copy_vec);
1208 return cp;
1211 /* Attach a copy CP to allocnos involved into the copy. */
1212 void
1213 ira_add_allocno_copy_to_list (ira_copy_t cp)
1215 ira_allocno_t first = cp->first, second = cp->second;
1217 cp->prev_first_allocno_copy = NULL;
1218 cp->prev_second_allocno_copy = NULL;
1219 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1220 if (cp->next_first_allocno_copy != NULL)
1222 if (cp->next_first_allocno_copy->first == first)
1223 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1224 else
1225 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1227 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1228 if (cp->next_second_allocno_copy != NULL)
1230 if (cp->next_second_allocno_copy->second == second)
1231 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1232 else
1233 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1235 ALLOCNO_COPIES (first) = cp;
1236 ALLOCNO_COPIES (second) = cp;
1239 /* Make a copy CP a canonical copy where number of the
1240 first allocno is less than the second one. */
1241 void
1242 ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1244 ira_allocno_t temp;
1245 ira_copy_t temp_cp;
1247 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1248 return;
1250 temp = cp->first;
1251 cp->first = cp->second;
1252 cp->second = temp;
1254 temp_cp = cp->prev_first_allocno_copy;
1255 cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1256 cp->prev_second_allocno_copy = temp_cp;
1258 temp_cp = cp->next_first_allocno_copy;
1259 cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1260 cp->next_second_allocno_copy = temp_cp;
1263 /* Create (or update frequency if the copy already exists) and return
1264 the copy of allocnos FIRST and SECOND with frequency FREQ
1265 corresponding to move insn INSN (if any) and originated from
1266 LOOP_TREE_NODE. */
1267 ira_copy_t
1268 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1269 bool constraint_p, rtx insn,
1270 ira_loop_tree_node_t loop_tree_node)
1272 ira_copy_t cp;
1274 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1276 cp->freq += freq;
1277 return cp;
1279 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1280 loop_tree_node);
1281 ira_assert (first != NULL && second != NULL);
1282 ira_add_allocno_copy_to_list (cp);
1283 ira_swap_allocno_copy_ends_if_necessary (cp);
1284 return cp;
1287 /* Print info about copy CP into file F. */
1288 static void
1289 print_copy (FILE *f, ira_copy_t cp)
1291 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1292 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1293 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1294 cp->insn != NULL
1295 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1298 /* Print info about copy CP into stderr. */
1299 void
1300 ira_debug_copy (ira_copy_t cp)
1302 print_copy (stderr, cp);
1305 /* Print info about all copies into file F. */
1306 static void
1307 print_copies (FILE *f)
1309 ira_copy_t cp;
1310 ira_copy_iterator ci;
1312 FOR_EACH_COPY (cp, ci)
1313 print_copy (f, cp);
1316 /* Print info about all copies into stderr. */
1317 void
1318 ira_debug_copies (void)
1320 print_copies (stderr);
1323 /* Print info about copies involving allocno A into file F. */
1324 static void
1325 print_allocno_copies (FILE *f, ira_allocno_t a)
1327 ira_allocno_t another_a;
1328 ira_copy_t cp, next_cp;
1330 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1331 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1333 if (cp->first == a)
1335 next_cp = cp->next_first_allocno_copy;
1336 another_a = cp->second;
1338 else if (cp->second == a)
1340 next_cp = cp->next_second_allocno_copy;
1341 another_a = cp->first;
1343 else
1344 gcc_unreachable ();
1345 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1346 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1348 fprintf (f, "\n");
1351 /* Print info about copies involving allocno A into stderr. */
1352 void
1353 ira_debug_allocno_copies (ira_allocno_t a)
1355 print_allocno_copies (stderr, a);
1358 /* The function frees memory allocated for copy CP. */
1359 static void
1360 finish_copy (ira_copy_t cp)
1362 pool_free (copy_pool, cp);
1366 /* Free memory allocated for all copies. */
1367 static void
1368 finish_copies (void)
1370 ira_copy_t cp;
1371 ira_copy_iterator ci;
1373 FOR_EACH_COPY (cp, ci)
1374 finish_copy (cp);
1375 VEC_free (ira_copy_t, heap, copy_vec);
1376 free_alloc_pool (copy_pool);
1381 /* Pools for cost vectors. It is defined only for allocno classes. */
1382 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1384 /* The function initiates work with hard register cost vectors. It
1385 creates allocation pool for each allocno class. */
1386 static void
1387 initiate_cost_vectors (void)
1389 int i;
1390 enum reg_class aclass;
1392 for (i = 0; i < ira_allocno_classes_num; i++)
1394 aclass = ira_allocno_classes[i];
1395 cost_vector_pool[aclass]
1396 = create_alloc_pool ("cost vectors",
1397 sizeof (int) * ira_class_hard_regs_num[aclass],
1398 100);
1402 /* Allocate and return a cost vector VEC for ACLASS. */
1403 int *
1404 ira_allocate_cost_vector (reg_class_t aclass)
1406 return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
1409 /* Free a cost vector VEC for ACLASS. */
1410 void
1411 ira_free_cost_vector (int *vec, reg_class_t aclass)
1413 ira_assert (vec != NULL);
1414 pool_free (cost_vector_pool[(int) aclass], vec);
1417 /* Finish work with hard register cost vectors. Release allocation
1418 pool for each allocno class. */
1419 static void
1420 finish_cost_vectors (void)
1422 int i;
1423 enum reg_class aclass;
1425 for (i = 0; i < ira_allocno_classes_num; i++)
1427 aclass = ira_allocno_classes[i];
1428 free_alloc_pool (cost_vector_pool[aclass]);
1434 /* The current loop tree node and its regno allocno map. */
1435 ira_loop_tree_node_t ira_curr_loop_tree_node;
1436 ira_allocno_t *ira_curr_regno_allocno_map;
1438 /* This recursive function traverses loop tree with root LOOP_NODE
1439 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1440 correspondingly in preorder and postorder. The function sets up
1441 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1442 basic block nodes of LOOP_NODE is also processed (before its
1443 subloop nodes). */
1444 void
1445 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1446 void (*preorder_func) (ira_loop_tree_node_t),
1447 void (*postorder_func) (ira_loop_tree_node_t))
1449 ira_loop_tree_node_t subloop_node;
1451 ira_assert (loop_node->bb == NULL);
1452 ira_curr_loop_tree_node = loop_node;
1453 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1455 if (preorder_func != NULL)
1456 (*preorder_func) (loop_node);
1458 if (bb_p)
1459 for (subloop_node = loop_node->children;
1460 subloop_node != NULL;
1461 subloop_node = subloop_node->next)
1462 if (subloop_node->bb != NULL)
1464 if (preorder_func != NULL)
1465 (*preorder_func) (subloop_node);
1467 if (postorder_func != NULL)
1468 (*postorder_func) (subloop_node);
1471 for (subloop_node = loop_node->subloops;
1472 subloop_node != NULL;
1473 subloop_node = subloop_node->subloop_next)
1475 ira_assert (subloop_node->bb == NULL);
1476 ira_traverse_loop_tree (bb_p, subloop_node,
1477 preorder_func, postorder_func);
1480 ira_curr_loop_tree_node = loop_node;
1481 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1483 if (postorder_func != NULL)
1484 (*postorder_func) (loop_node);
1489 /* The basic block currently being processed. */
1490 static basic_block curr_bb;
1492 /* This recursive function creates allocnos corresponding to
1493 pseudo-registers containing in X. True OUTPUT_P means that X is
1494 a lvalue. */
1495 static void
1496 create_insn_allocnos (rtx x, bool output_p)
1498 int i, j;
1499 const char *fmt;
1500 enum rtx_code code = GET_CODE (x);
1502 if (code == REG)
1504 int regno;
1506 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1508 ira_allocno_t a;
1510 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1511 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1513 ALLOCNO_NREFS (a)++;
1514 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1515 if (output_p)
1516 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1518 return;
1520 else if (code == SET)
1522 create_insn_allocnos (SET_DEST (x), true);
1523 create_insn_allocnos (SET_SRC (x), false);
1524 return;
1526 else if (code == CLOBBER)
1528 create_insn_allocnos (XEXP (x, 0), true);
1529 return;
1531 else if (code == MEM)
1533 create_insn_allocnos (XEXP (x, 0), false);
1534 return;
1536 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1537 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1539 create_insn_allocnos (XEXP (x, 0), true);
1540 create_insn_allocnos (XEXP (x, 0), false);
1541 return;
1544 fmt = GET_RTX_FORMAT (code);
1545 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1547 if (fmt[i] == 'e')
1548 create_insn_allocnos (XEXP (x, i), output_p);
1549 else if (fmt[i] == 'E')
1550 for (j = 0; j < XVECLEN (x, i); j++)
1551 create_insn_allocnos (XVECEXP (x, i, j), output_p);
1555 /* Create allocnos corresponding to pseudo-registers living in the
1556 basic block represented by the corresponding loop tree node
1557 BB_NODE. */
1558 static void
1559 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1561 basic_block bb;
1562 rtx insn;
1563 unsigned int i;
1564 bitmap_iterator bi;
1566 curr_bb = bb = bb_node->bb;
1567 ira_assert (bb != NULL);
1568 FOR_BB_INSNS_REVERSE (bb, insn)
1569 if (NONDEBUG_INSN_P (insn))
1570 create_insn_allocnos (PATTERN (insn), false);
1571 /* It might be a allocno living through from one subloop to
1572 another. */
1573 EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
1574 if (ira_curr_regno_allocno_map[i] == NULL)
1575 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1578 /* Create allocnos corresponding to pseudo-registers living on edge E
1579 (a loop entry or exit). Also mark the allocnos as living on the
1580 loop border. */
1581 static void
1582 create_loop_allocnos (edge e)
1584 unsigned int i;
1585 bitmap live_in_regs, border_allocnos;
1586 bitmap_iterator bi;
1587 ira_loop_tree_node_t parent;
1589 live_in_regs = DF_LR_IN (e->dest);
1590 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1591 EXECUTE_IF_SET_IN_REG_SET (DF_LR_OUT (e->src),
1592 FIRST_PSEUDO_REGISTER, i, bi)
1593 if (bitmap_bit_p (live_in_regs, i))
1595 if (ira_curr_regno_allocno_map[i] == NULL)
1597 /* The order of creations is important for right
1598 ira_regno_allocno_map. */
1599 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1600 && parent->regno_allocno_map[i] == NULL)
1601 ira_create_allocno (i, false, parent);
1602 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1604 bitmap_set_bit (border_allocnos,
1605 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1609 /* Create allocnos corresponding to pseudo-registers living in loop
1610 represented by the corresponding loop tree node LOOP_NODE. This
1611 function is called by ira_traverse_loop_tree. */
1612 static void
1613 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1615 if (loop_node->bb != NULL)
1616 create_bb_allocnos (loop_node);
1617 else if (loop_node != ira_loop_tree_root)
1619 int i;
1620 edge_iterator ei;
1621 edge e;
1622 VEC (edge, heap) *edges;
1624 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1625 if (e->src != loop_node->loop->latch)
1626 create_loop_allocnos (e);
1628 edges = get_loop_exit_edges (loop_node->loop);
1629 FOR_EACH_VEC_ELT (edge, edges, i, e)
1630 create_loop_allocnos (e);
1631 VEC_free (edge, heap, edges);
1635 /* Propagate information about allocnos modified inside the loop given
1636 by its LOOP_TREE_NODE to its parent. */
1637 static void
1638 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1640 if (loop_tree_node == ira_loop_tree_root)
1641 return;
1642 ira_assert (loop_tree_node->bb == NULL);
1643 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1644 loop_tree_node->modified_regnos);
1647 /* Propagate new info about allocno A (see comments about accumulated
1648 info in allocno definition) to the corresponding allocno on upper
1649 loop tree level. So allocnos on upper levels accumulate
1650 information about the corresponding allocnos in nested regions.
1651 The new info means allocno info finally calculated in this
1652 file. */
1653 static void
1654 propagate_allocno_info (void)
1656 int i;
1657 ira_allocno_t a, parent_a;
1658 ira_loop_tree_node_t parent;
1659 enum reg_class aclass;
1661 if (flag_ira_region != IRA_REGION_ALL
1662 && flag_ira_region != IRA_REGION_MIXED)
1663 return;
1664 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1665 for (a = ira_regno_allocno_map[i];
1666 a != NULL;
1667 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1668 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1669 && (parent_a = parent->regno_allocno_map[i]) != NULL
1670 /* There are no caps yet at this point. So use
1671 border_allocnos to find allocnos for the propagation. */
1672 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1673 ALLOCNO_NUM (a)))
1675 if (! ALLOCNO_BAD_SPILL_P (a))
1676 ALLOCNO_BAD_SPILL_P (parent_a) = false;
1677 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1678 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1679 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1680 merge_hard_reg_conflicts (a, parent_a, true);
1681 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1682 += ALLOCNO_CALLS_CROSSED_NUM (a);
1683 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1684 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1685 aclass = ALLOCNO_CLASS (a);
1686 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
1687 ira_allocate_and_accumulate_costs
1688 (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
1689 ALLOCNO_HARD_REG_COSTS (a));
1690 ira_allocate_and_accumulate_costs
1691 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1692 aclass,
1693 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1694 ALLOCNO_CLASS_COST (parent_a)
1695 += ALLOCNO_CLASS_COST (a);
1696 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1700 /* Create allocnos corresponding to pseudo-registers in the current
1701 function. Traverse the loop tree for this. */
1702 static void
1703 create_allocnos (void)
1705 /* We need to process BB first to correctly link allocnos by member
1706 next_regno_allocno. */
1707 ira_traverse_loop_tree (true, ira_loop_tree_root,
1708 create_loop_tree_node_allocnos, NULL);
1709 if (optimize)
1710 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1711 propagate_modified_regnos);
1716 /* The page contains function to remove some regions from a separate
1717 register allocation. We remove regions whose separate allocation
1718 will hardly improve the result. As a result we speed up regional
1719 register allocation. */
1721 /* The function changes the object in range list given by R to OBJ. */
1722 static void
1723 change_object_in_range_list (live_range_t r, ira_object_t obj)
1725 for (; r != NULL; r = r->next)
1726 r->object = obj;
1729 /* Move all live ranges associated with allocno FROM to allocno TO. */
1730 static void
1731 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1733 int i;
1734 int n = ALLOCNO_NUM_OBJECTS (from);
1736 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1738 for (i = 0; i < n; i++)
1740 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1741 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1742 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1744 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1746 fprintf (ira_dump_file,
1747 " Moving ranges of a%dr%d to a%dr%d: ",
1748 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1749 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1750 ira_print_live_range_list (ira_dump_file, lr);
1752 change_object_in_range_list (lr, to_obj);
1753 OBJECT_LIVE_RANGES (to_obj)
1754 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1755 OBJECT_LIVE_RANGES (from_obj) = NULL;
1759 static void
1760 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1762 int i;
1763 int n = ALLOCNO_NUM_OBJECTS (from);
1765 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1767 for (i = 0; i < n; i++)
1769 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1770 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1771 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1773 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1775 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
1776 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1777 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1778 ira_print_live_range_list (ira_dump_file, lr);
1780 lr = ira_copy_live_range_list (lr);
1781 change_object_in_range_list (lr, to_obj);
1782 OBJECT_LIVE_RANGES (to_obj)
1783 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1787 /* Return TRUE if NODE represents a loop with low register
1788 pressure. */
1789 static bool
1790 low_pressure_loop_node_p (ira_loop_tree_node_t node)
1792 int i;
1793 enum reg_class pclass;
1795 if (node->bb != NULL)
1796 return false;
1798 for (i = 0; i < ira_pressure_classes_num; i++)
1800 pclass = ira_pressure_classes[i];
1801 if (node->reg_pressure[pclass] > ira_available_class_regs[pclass]
1802 && ira_available_class_regs[pclass] > 1)
1803 return false;
1805 return true;
1808 #ifdef STACK_REGS
1809 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
1810 form a region from such loop if the target use stack register
1811 because reg-stack.c can not deal with such edges. */
1812 static bool
1813 loop_with_complex_edge_p (struct loop *loop)
1815 int i;
1816 edge_iterator ei;
1817 edge e;
1818 VEC (edge, heap) *edges;
1820 FOR_EACH_EDGE (e, ei, loop->header->preds)
1821 if (e->flags & EDGE_EH)
1822 return true;
1823 edges = get_loop_exit_edges (loop);
1824 FOR_EACH_VEC_ELT (edge, edges, i, e)
1825 if (e->flags & EDGE_COMPLEX)
1826 return true;
1827 return false;
1829 #endif
1831 /* Sort loops for marking them for removal. We put already marked
1832 loops first, then less frequent loops next, and then outer loops
1833 next. */
1834 static int
1835 loop_compare_func (const void *v1p, const void *v2p)
1837 int diff;
1838 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
1839 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
1841 ira_assert (l1->parent != NULL && l2->parent != NULL);
1842 if (l1->to_remove_p && ! l2->to_remove_p)
1843 return -1;
1844 if (! l1->to_remove_p && l2->to_remove_p)
1845 return 1;
1846 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
1847 return diff;
1848 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
1849 return diff;
1850 /* Make sorting stable. */
1851 return l1->loop->num - l2->loop->num;
1854 /* Mark loops which should be removed from regional allocation. We
1855 remove a loop with low register pressure inside another loop with
1856 register pressure. In this case a separate allocation of the loop
1857 hardly helps (for irregular register file architecture it could
1858 help by choosing a better hard register in the loop but we prefer
1859 faster allocation even in this case). We also remove cheap loops
1860 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
1861 exit or enter edges are removed too because the allocation might
1862 require put pseudo moves on the EH edges (we could still do this
1863 for pseudos with caller saved hard registers in some cases but it
1864 is impossible to say here or during top-down allocation pass what
1865 hard register the pseudos get finally). */
1866 static void
1867 mark_loops_for_removal (void)
1869 int i, n;
1870 ira_loop_tree_node_t *sorted_loops;
1871 loop_p loop;
1873 sorted_loops
1874 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
1875 * VEC_length (loop_p,
1876 ira_loops.larray));
1877 for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1878 if (ira_loop_nodes[i].regno_allocno_map != NULL)
1880 if (ira_loop_nodes[i].parent == NULL)
1882 /* Don't remove the root. */
1883 ira_loop_nodes[i].to_remove_p = false;
1884 continue;
1886 sorted_loops[n++] = &ira_loop_nodes[i];
1887 ira_loop_nodes[i].to_remove_p
1888 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
1889 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
1890 #ifdef STACK_REGS
1891 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
1892 #endif
1895 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
1896 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
1898 sorted_loops[i]->to_remove_p = true;
1899 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1900 fprintf
1901 (ira_dump_file,
1902 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
1903 sorted_loops[i]->loop->num, sorted_loops[i]->loop->header->index,
1904 sorted_loops[i]->loop->header->frequency,
1905 loop_depth (sorted_loops[i]->loop),
1906 low_pressure_loop_node_p (sorted_loops[i]->parent)
1907 && low_pressure_loop_node_p (sorted_loops[i])
1908 ? "low pressure" : "cheap loop");
1910 ira_free (sorted_loops);
1913 /* Mark all loops but root for removing. */
1914 static void
1915 mark_all_loops_for_removal (void)
1917 int i;
1918 loop_p loop;
1920 FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
1921 if (ira_loop_nodes[i].regno_allocno_map != NULL)
1923 if (ira_loop_nodes[i].parent == NULL)
1925 /* Don't remove the root. */
1926 ira_loop_nodes[i].to_remove_p = false;
1927 continue;
1929 ira_loop_nodes[i].to_remove_p = true;
1930 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1931 fprintf
1932 (ira_dump_file,
1933 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
1934 ira_loop_nodes[i].loop->num,
1935 ira_loop_nodes[i].loop->header->index,
1936 ira_loop_nodes[i].loop->header->frequency,
1937 loop_depth (ira_loop_nodes[i].loop));
1941 /* Definition of vector of loop tree nodes. */
1942 DEF_VEC_P(ira_loop_tree_node_t);
1943 DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
1945 /* Vec containing references to all removed loop tree nodes. */
1946 static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
1948 /* Vec containing references to all children of loop tree nodes. */
1949 static VEC(ira_loop_tree_node_t,heap) *children_vec;
1951 /* Remove subregions of NODE if their separate allocation will not
1952 improve the result. */
1953 static void
1954 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
1956 unsigned int start;
1957 bool remove_p;
1958 ira_loop_tree_node_t subnode;
1960 remove_p = node->to_remove_p;
1961 if (! remove_p)
1962 VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
1963 start = VEC_length (ira_loop_tree_node_t, children_vec);
1964 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
1965 if (subnode->bb == NULL)
1966 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
1967 else
1968 VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
1969 node->children = node->subloops = NULL;
1970 if (remove_p)
1972 VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
1973 return;
1975 while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
1977 subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
1978 subnode->parent = node;
1979 subnode->next = node->children;
1980 node->children = subnode;
1981 if (subnode->bb == NULL)
1983 subnode->subloop_next = node->subloops;
1984 node->subloops = subnode;
1989 /* Return TRUE if NODE is inside PARENT. */
1990 static bool
1991 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
1993 for (node = node->parent; node != NULL; node = node->parent)
1994 if (node == parent)
1995 return true;
1996 return false;
1999 /* Sort allocnos according to their order in regno allocno list. */
2000 static int
2001 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2003 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2004 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2005 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2006 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2008 if (loop_is_inside_p (n1, n2))
2009 return -1;
2010 else if (loop_is_inside_p (n2, n1))
2011 return 1;
2012 /* If allocnos are equally good, sort by allocno numbers, so that
2013 the results of qsort leave nothing to chance. We put allocnos
2014 with higher number first in the list because it is the original
2015 order for allocnos from loops on the same levels. */
2016 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2019 /* This array is used to sort allocnos to restore allocno order in
2020 the regno allocno list. */
2021 static ira_allocno_t *regno_allocnos;
2023 /* Restore allocno order for REGNO in the regno allocno list. */
2024 static void
2025 ira_rebuild_regno_allocno_list (int regno)
2027 int i, n;
2028 ira_allocno_t a;
2030 for (n = 0, a = ira_regno_allocno_map[regno];
2031 a != NULL;
2032 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2033 regno_allocnos[n++] = a;
2034 ira_assert (n > 0);
2035 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2036 regno_allocno_order_compare_func);
2037 for (i = 1; i < n; i++)
2038 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2039 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2040 ira_regno_allocno_map[regno] = regno_allocnos[0];
2041 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2042 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2045 /* Propagate info from allocno FROM_A to allocno A. */
2046 static void
2047 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2049 enum reg_class aclass;
2051 merge_hard_reg_conflicts (from_a, a, false);
2052 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2053 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2054 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2055 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2056 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2057 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2058 if (! ALLOCNO_BAD_SPILL_P (from_a))
2059 ALLOCNO_BAD_SPILL_P (a) = false;
2060 aclass = ALLOCNO_CLASS (from_a);
2061 ira_assert (aclass == ALLOCNO_CLASS (a));
2062 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2063 ALLOCNO_HARD_REG_COSTS (from_a));
2064 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2065 aclass,
2066 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2067 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2068 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2071 /* Remove allocnos from loops removed from the allocation
2072 consideration. */
2073 static void
2074 remove_unnecessary_allocnos (void)
2076 int regno;
2077 bool merged_p, rebuild_p;
2078 ira_allocno_t a, prev_a, next_a, parent_a;
2079 ira_loop_tree_node_t a_node, parent;
2081 merged_p = false;
2082 regno_allocnos = NULL;
2083 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2085 rebuild_p = false;
2086 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2087 a != NULL;
2088 a = next_a)
2090 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2091 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2092 if (! a_node->to_remove_p)
2093 prev_a = a;
2094 else
2096 for (parent = a_node->parent;
2097 (parent_a = parent->regno_allocno_map[regno]) == NULL
2098 && parent->to_remove_p;
2099 parent = parent->parent)
2101 if (parent_a == NULL)
2103 /* There are no allocnos with the same regno in
2104 upper region -- just move the allocno to the
2105 upper region. */
2106 prev_a = a;
2107 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2108 parent->regno_allocno_map[regno] = a;
2109 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2110 rebuild_p = true;
2112 else
2114 /* Remove the allocno and update info of allocno in
2115 the upper region. */
2116 if (prev_a == NULL)
2117 ira_regno_allocno_map[regno] = next_a;
2118 else
2119 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2120 move_allocno_live_ranges (a, parent_a);
2121 merged_p = true;
2122 propagate_some_info_from_allocno (parent_a, a);
2123 /* Remove it from the corresponding regno allocno
2124 map to avoid info propagation of subsequent
2125 allocno into this already removed allocno. */
2126 a_node->regno_allocno_map[regno] = NULL;
2127 finish_allocno (a);
2131 if (rebuild_p)
2132 /* We need to restore the order in regno allocno list. */
2134 if (regno_allocnos == NULL)
2135 regno_allocnos
2136 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2137 * ira_allocnos_num);
2138 ira_rebuild_regno_allocno_list (regno);
2141 if (merged_p)
2142 ira_rebuild_start_finish_chains ();
2143 if (regno_allocnos != NULL)
2144 ira_free (regno_allocnos);
2147 /* Remove allocnos from all loops but the root. */
2148 static void
2149 remove_low_level_allocnos (void)
2151 int regno;
2152 bool merged_p, propagate_p;
2153 ira_allocno_t a, top_a;
2154 ira_loop_tree_node_t a_node, parent;
2155 ira_allocno_iterator ai;
2157 merged_p = false;
2158 FOR_EACH_ALLOCNO (a, ai)
2160 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2161 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2162 continue;
2163 regno = ALLOCNO_REGNO (a);
2164 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2166 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2167 ira_loop_tree_root->regno_allocno_map[regno] = a;
2168 continue;
2170 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2171 /* Remove the allocno and update info of allocno in the upper
2172 region. */
2173 move_allocno_live_ranges (a, top_a);
2174 merged_p = true;
2175 if (propagate_p)
2176 propagate_some_info_from_allocno (top_a, a);
2178 FOR_EACH_ALLOCNO (a, ai)
2180 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2181 if (a_node == ira_loop_tree_root)
2182 continue;
2183 parent = a_node->parent;
2184 regno = ALLOCNO_REGNO (a);
2185 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2186 ira_assert (ALLOCNO_CAP (a) != NULL);
2187 else if (ALLOCNO_CAP (a) == NULL)
2188 ira_assert (parent->regno_allocno_map[regno] != NULL);
2190 FOR_EACH_ALLOCNO (a, ai)
2192 regno = ALLOCNO_REGNO (a);
2193 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2195 ira_object_t obj;
2196 ira_allocno_object_iterator oi;
2198 ira_regno_allocno_map[regno] = a;
2199 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2200 ALLOCNO_CAP_MEMBER (a) = NULL;
2201 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2202 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2203 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2204 #ifdef STACK_REGS
2205 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2206 ALLOCNO_NO_STACK_REG_P (a) = true;
2207 #endif
2209 else
2210 finish_allocno (a);
2212 if (merged_p)
2213 ira_rebuild_start_finish_chains ();
2216 /* Remove loops from consideration. We remove all loops except for
2217 root if ALL_P or loops for which a separate allocation will not
2218 improve the result. We have to do this after allocno creation and
2219 their costs and allocno class evaluation because only after that
2220 the register pressure can be known and is calculated. */
2221 static void
2222 remove_unnecessary_regions (bool all_p)
2224 if (all_p)
2225 mark_all_loops_for_removal ();
2226 else
2227 mark_loops_for_removal ();
2228 children_vec
2229 = VEC_alloc (ira_loop_tree_node_t, heap,
2230 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2231 removed_loop_vec
2232 = VEC_alloc (ira_loop_tree_node_t, heap,
2233 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2234 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
2235 VEC_free (ira_loop_tree_node_t, heap, children_vec);
2236 if (all_p)
2237 remove_low_level_allocnos ();
2238 else
2239 remove_unnecessary_allocnos ();
2240 while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
2241 finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
2242 VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
2247 /* At this point true value of allocno attribute bad_spill_p means
2248 that there is an insn where allocno occurs and where the allocno
2249 can not be used as memory. The function updates the attribute, now
2250 it can be true only for allocnos which can not be used as memory in
2251 an insn and in whose live ranges there is other allocno deaths.
2252 Spilling allocnos with true value will not improve the code because
2253 it will not make other allocnos colorable and additional reloads
2254 for the corresponding pseudo will be generated in reload pass for
2255 each insn it occurs.
2257 This is a trick mentioned in one classic article of Chaitin etc
2258 which is frequently omitted in other implementations of RA based on
2259 graph coloring. */
2260 static void
2261 update_bad_spill_attribute (void)
2263 int i;
2264 ira_allocno_t a;
2265 ira_allocno_iterator ai;
2266 ira_allocno_object_iterator aoi;
2267 ira_object_t obj;
2268 live_range_t r;
2269 enum reg_class aclass;
2270 bitmap_head dead_points[N_REG_CLASSES];
2272 for (i = 0; i < ira_allocno_classes_num; i++)
2274 aclass = ira_allocno_classes[i];
2275 bitmap_initialize (&dead_points[aclass], &reg_obstack);
2277 FOR_EACH_ALLOCNO (a, ai)
2279 aclass = ALLOCNO_CLASS (a);
2280 if (aclass == NO_REGS)
2281 continue;
2282 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2283 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2284 bitmap_set_bit (&dead_points[aclass], r->finish);
2286 FOR_EACH_ALLOCNO (a, ai)
2288 aclass = ALLOCNO_CLASS (a);
2289 if (aclass == NO_REGS)
2290 continue;
2291 if (! ALLOCNO_BAD_SPILL_P (a))
2292 continue;
2293 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2295 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2297 for (i = r->start + 1; i < r->finish; i++)
2298 if (bitmap_bit_p (&dead_points[aclass], i))
2299 break;
2300 if (i < r->finish)
2301 break;
2303 if (r != NULL)
2305 ALLOCNO_BAD_SPILL_P (a) = false;
2306 break;
2310 for (i = 0; i < ira_allocno_classes_num; i++)
2312 aclass = ira_allocno_classes[i];
2313 bitmap_clear (&dead_points[aclass]);
2319 /* Set up minimal and maximal live range points for allocnos. */
2320 static void
2321 setup_min_max_allocno_live_range_point (void)
2323 int i;
2324 ira_allocno_t a, parent_a, cap;
2325 ira_allocno_iterator ai;
2326 #ifdef ENABLE_IRA_CHECKING
2327 ira_object_iterator oi;
2328 ira_object_t obj;
2329 #endif
2330 live_range_t r;
2331 ira_loop_tree_node_t parent;
2333 FOR_EACH_ALLOCNO (a, ai)
2335 int n = ALLOCNO_NUM_OBJECTS (a);
2337 for (i = 0; i < n; i++)
2339 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2340 r = OBJECT_LIVE_RANGES (obj);
2341 if (r == NULL)
2342 continue;
2343 OBJECT_MAX (obj) = r->finish;
2344 for (; r->next != NULL; r = r->next)
2346 OBJECT_MIN (obj) = r->start;
2349 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2350 for (a = ira_regno_allocno_map[i];
2351 a != NULL;
2352 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2354 int j;
2355 int n = ALLOCNO_NUM_OBJECTS (a);
2357 for (j = 0; j < n; j++)
2359 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2360 ira_object_t parent_obj;
2362 if (OBJECT_MAX (obj) < 0)
2363 continue;
2364 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2365 /* Accumulation of range info. */
2366 if (ALLOCNO_CAP (a) != NULL)
2368 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2370 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2371 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2372 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2373 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2374 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2376 continue;
2378 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2379 continue;
2380 parent_a = parent->regno_allocno_map[i];
2381 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2382 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2383 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2384 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2385 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2388 #ifdef ENABLE_IRA_CHECKING
2389 FOR_EACH_OBJECT (obj, oi)
2391 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2392 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2393 continue;
2394 gcc_unreachable ();
2396 #endif
2399 /* Sort allocnos according to their live ranges. Allocnos with
2400 smaller allocno class are put first unless we use priority
2401 coloring. Allocnos with the same class are ordered according
2402 their start (min). Allocnos with the same start are ordered
2403 according their finish (max). */
2404 static int
2405 object_range_compare_func (const void *v1p, const void *v2p)
2407 int diff;
2408 ira_object_t obj1 = *(const ira_object_t *) v1p;
2409 ira_object_t obj2 = *(const ira_object_t *) v2p;
2410 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2411 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2413 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2414 return diff;
2415 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2416 return diff;
2417 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2420 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2421 static void
2422 sort_conflict_id_map (void)
2424 int i, num;
2425 ira_allocno_t a;
2426 ira_allocno_iterator ai;
2428 num = 0;
2429 FOR_EACH_ALLOCNO (a, ai)
2431 ira_allocno_object_iterator oi;
2432 ira_object_t obj;
2434 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2435 ira_object_id_map[num++] = obj;
2437 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2438 object_range_compare_func);
2439 for (i = 0; i < num; i++)
2441 ira_object_t obj = ira_object_id_map[i];
2443 gcc_assert (obj != NULL);
2444 OBJECT_CONFLICT_ID (obj) = i;
2446 for (i = num; i < ira_objects_num; i++)
2447 ira_object_id_map[i] = NULL;
2450 /* Set up minimal and maximal conflict ids of allocnos with which
2451 given allocno can conflict. */
2452 static void
2453 setup_min_max_conflict_allocno_ids (void)
2455 int aclass;
2456 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2457 int *live_range_min, *last_lived;
2458 int word0_min, word0_max;
2459 ira_allocno_t a;
2460 ira_allocno_iterator ai;
2462 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2463 aclass = -1;
2464 first_not_finished = -1;
2465 for (i = 0; i < ira_objects_num; i++)
2467 ira_object_t obj = ira_object_id_map[i];
2469 if (obj == NULL)
2470 continue;
2472 a = OBJECT_ALLOCNO (obj);
2474 if (aclass < 0)
2476 aclass = ALLOCNO_CLASS (a);
2477 min = i;
2478 first_not_finished = i;
2480 else
2482 start = OBJECT_MIN (obj);
2483 /* If we skip an allocno, the allocno with smaller ids will
2484 be also skipped because of the secondary sorting the
2485 range finishes (see function
2486 object_range_compare_func). */
2487 while (first_not_finished < i
2488 && start > OBJECT_MAX (ira_object_id_map
2489 [first_not_finished]))
2490 first_not_finished++;
2491 min = first_not_finished;
2493 if (min == i)
2494 /* We could increase min further in this case but it is good
2495 enough. */
2496 min++;
2497 live_range_min[i] = OBJECT_MIN (obj);
2498 OBJECT_MIN (obj) = min;
2500 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2501 aclass = -1;
2502 filled_area_start = -1;
2503 for (i = ira_objects_num - 1; i >= 0; i--)
2505 ira_object_t obj = ira_object_id_map[i];
2507 if (obj == NULL)
2508 continue;
2510 a = OBJECT_ALLOCNO (obj);
2511 if (aclass < 0)
2513 aclass = ALLOCNO_CLASS (a);
2514 for (j = 0; j < ira_max_point; j++)
2515 last_lived[j] = -1;
2516 filled_area_start = ira_max_point;
2518 min = live_range_min[i];
2519 finish = OBJECT_MAX (obj);
2520 max = last_lived[finish];
2521 if (max < 0)
2522 /* We could decrease max further in this case but it is good
2523 enough. */
2524 max = OBJECT_CONFLICT_ID (obj) - 1;
2525 OBJECT_MAX (obj) = max;
2526 /* In filling, we can go further A range finish to recognize
2527 intersection quickly because if the finish of subsequently
2528 processed allocno (it has smaller conflict id) range is
2529 further A range finish than they are definitely intersected
2530 (the reason for this is the allocnos with bigger conflict id
2531 have their range starts not smaller than allocnos with
2532 smaller ids. */
2533 for (j = min; j < filled_area_start; j++)
2534 last_lived[j] = i;
2535 filled_area_start = min;
2537 ira_free (last_lived);
2538 ira_free (live_range_min);
2540 /* For allocnos with more than one object, we may later record extra conflicts in
2541 subobject 0 that we cannot really know about here.
2542 For now, simply widen the min/max range of these subobjects. */
2544 word0_min = INT_MAX;
2545 word0_max = INT_MIN;
2547 FOR_EACH_ALLOCNO (a, ai)
2549 int n = ALLOCNO_NUM_OBJECTS (a);
2550 ira_object_t obj0;
2552 if (n < 2)
2553 continue;
2554 obj0 = ALLOCNO_OBJECT (a, 0);
2555 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2556 word0_min = OBJECT_CONFLICT_ID (obj0);
2557 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2558 word0_max = OBJECT_CONFLICT_ID (obj0);
2560 FOR_EACH_ALLOCNO (a, ai)
2562 int n = ALLOCNO_NUM_OBJECTS (a);
2563 ira_object_t obj0;
2565 if (n < 2)
2566 continue;
2567 obj0 = ALLOCNO_OBJECT (a, 0);
2568 if (OBJECT_MIN (obj0) > word0_min)
2569 OBJECT_MIN (obj0) = word0_min;
2570 if (OBJECT_MAX (obj0) < word0_max)
2571 OBJECT_MAX (obj0) = word0_max;
2577 static void
2578 create_caps (void)
2580 ira_allocno_t a;
2581 ira_allocno_iterator ai;
2582 ira_loop_tree_node_t loop_tree_node;
2584 FOR_EACH_ALLOCNO (a, ai)
2586 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2587 continue;
2588 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2589 create_cap_allocno (a);
2590 else if (ALLOCNO_CAP (a) == NULL)
2592 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2593 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2594 create_cap_allocno (a);
2601 /* The page contains code transforming more one region internal
2602 representation (IR) to one region IR which is necessary for reload.
2603 This transformation is called IR flattening. We might just rebuild
2604 the IR for one region but we don't do it because it takes a lot of
2605 time. */
2607 /* Map: regno -> allocnos which will finally represent the regno for
2608 IR with one region. */
2609 static ira_allocno_t *regno_top_level_allocno_map;
2611 /* Find the allocno that corresponds to A at a level one higher up in the
2612 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2613 ira_allocno_t
2614 ira_parent_allocno (ira_allocno_t a)
2616 ira_loop_tree_node_t parent;
2618 if (ALLOCNO_CAP (a) != NULL)
2619 return NULL;
2621 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2622 if (parent == NULL)
2623 return NULL;
2625 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
2628 /* Find the allocno that corresponds to A at a level one higher up in the
2629 loop tree. If ALLOCNO_CAP is set for A, return that. */
2630 ira_allocno_t
2631 ira_parent_or_cap_allocno (ira_allocno_t a)
2633 if (ALLOCNO_CAP (a) != NULL)
2634 return ALLOCNO_CAP (a);
2636 return ira_parent_allocno (a);
2639 /* Process all allocnos originated from pseudo REGNO and copy live
2640 ranges, hard reg conflicts, and allocno stack reg attributes from
2641 low level allocnos to final allocnos which are destinations of
2642 removed stores at a loop exit. Return true if we copied live
2643 ranges. */
2644 static bool
2645 copy_info_to_removed_store_destinations (int regno)
2647 ira_allocno_t a;
2648 ira_allocno_t parent_a = NULL;
2649 ira_loop_tree_node_t parent;
2650 bool merged_p;
2652 merged_p = false;
2653 for (a = ira_regno_allocno_map[regno];
2654 a != NULL;
2655 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2657 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
2658 /* This allocno will be removed. */
2659 continue;
2661 /* Caps will be removed. */
2662 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2663 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2664 parent != NULL;
2665 parent = parent->parent)
2666 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2667 || (parent_a
2668 == regno_top_level_allocno_map[REGNO
2669 (allocno_emit_reg (parent_a))]
2670 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
2671 break;
2672 if (parent == NULL || parent_a == NULL)
2673 continue;
2675 copy_allocno_live_ranges (a, parent_a);
2676 merge_hard_reg_conflicts (a, parent_a, true);
2678 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2679 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2680 += ALLOCNO_CALLS_CROSSED_NUM (a);
2681 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2682 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2683 merged_p = true;
2685 return merged_p;
2688 /* Flatten the IR. In other words, this function transforms IR as if
2689 it were built with one region (without loops). We could make it
2690 much simpler by rebuilding IR with one region, but unfortunately it
2691 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
2692 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2693 IRA_MAX_POINT before emitting insns on the loop borders. */
2694 void
2695 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2697 int i, j;
2698 bool keep_p;
2699 int hard_regs_num;
2700 bool new_pseudos_p, merged_p, mem_dest_p;
2701 unsigned int n;
2702 enum reg_class aclass;
2703 ira_allocno_t a, parent_a, first, second, node_first, node_second;
2704 ira_copy_t cp;
2705 ira_loop_tree_node_t node;
2706 live_range_t r;
2707 ira_allocno_iterator ai;
2708 ira_copy_iterator ci;
2710 regno_top_level_allocno_map
2711 = (ira_allocno_t *) ira_allocate (max_reg_num ()
2712 * sizeof (ira_allocno_t));
2713 memset (regno_top_level_allocno_map, 0,
2714 max_reg_num () * sizeof (ira_allocno_t));
2715 new_pseudos_p = merged_p = false;
2716 FOR_EACH_ALLOCNO (a, ai)
2718 ira_allocno_object_iterator oi;
2719 ira_object_t obj;
2721 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2722 /* Caps are not in the regno allocno maps and they are never
2723 will be transformed into allocnos existing after IR
2724 flattening. */
2725 continue;
2726 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2727 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2728 OBJECT_CONFLICT_HARD_REGS (obj));
2729 #ifdef STACK_REGS
2730 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2731 #endif
2733 /* Fix final allocno attributes. */
2734 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2736 mem_dest_p = false;
2737 for (a = ira_regno_allocno_map[i];
2738 a != NULL;
2739 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2741 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
2743 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2744 if (data->somewhere_renamed_p)
2745 new_pseudos_p = true;
2746 parent_a = ira_parent_allocno (a);
2747 if (parent_a == NULL)
2749 ALLOCNO_COPIES (a) = NULL;
2750 regno_top_level_allocno_map[REGNO (data->reg)] = a;
2751 continue;
2753 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2755 if (data->mem_optimized_dest != NULL)
2756 mem_dest_p = true;
2757 parent_data = ALLOCNO_EMIT_DATA (parent_a);
2758 if (REGNO (data->reg) == REGNO (parent_data->reg))
2760 merge_hard_reg_conflicts (a, parent_a, true);
2761 move_allocno_live_ranges (a, parent_a);
2762 merged_p = true;
2763 parent_data->mem_optimized_dest_p
2764 = (parent_data->mem_optimized_dest_p
2765 || data->mem_optimized_dest_p);
2766 continue;
2768 new_pseudos_p = true;
2769 for (;;)
2771 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2772 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2773 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2774 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2775 -= ALLOCNO_CALLS_CROSSED_NUM (a);
2776 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2777 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2778 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2779 && ALLOCNO_NREFS (parent_a) >= 0
2780 && ALLOCNO_FREQ (parent_a) >= 0);
2781 aclass = ALLOCNO_CLASS (parent_a);
2782 hard_regs_num = ira_class_hard_regs_num[aclass];
2783 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2784 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2785 for (j = 0; j < hard_regs_num; j++)
2786 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2787 -= ALLOCNO_HARD_REG_COSTS (a)[j];
2788 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2789 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2790 for (j = 0; j < hard_regs_num; j++)
2791 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2792 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2793 ALLOCNO_CLASS_COST (parent_a)
2794 -= ALLOCNO_CLASS_COST (a);
2795 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2796 parent_a = ira_parent_allocno (parent_a);
2797 if (parent_a == NULL)
2798 break;
2800 ALLOCNO_COPIES (a) = NULL;
2801 regno_top_level_allocno_map[REGNO (data->reg)] = a;
2803 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2804 merged_p = true;
2806 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2807 if (merged_p || ira_max_point_before_emit != ira_max_point)
2808 ira_rebuild_start_finish_chains ();
2809 if (new_pseudos_p)
2811 sparseset objects_live;
2813 /* Rebuild conflicts. */
2814 FOR_EACH_ALLOCNO (a, ai)
2816 ira_allocno_object_iterator oi;
2817 ira_object_t obj;
2819 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2820 || ALLOCNO_CAP_MEMBER (a) != NULL)
2821 continue;
2822 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2824 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2825 ira_assert (r->object == obj);
2826 clear_conflicts (obj);
2829 objects_live = sparseset_alloc (ira_objects_num);
2830 for (i = 0; i < ira_max_point; i++)
2832 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
2834 ira_object_t obj = r->object;
2836 a = OBJECT_ALLOCNO (obj);
2837 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2838 || ALLOCNO_CAP_MEMBER (a) != NULL)
2839 continue;
2841 aclass = ALLOCNO_CLASS (a);
2842 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
2843 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
2845 ira_object_t live_obj = ira_object_id_map[n];
2846 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
2847 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
2849 if (ira_reg_classes_intersect_p[aclass][live_aclass]
2850 /* Don't set up conflict for the allocno with itself. */
2851 && live_a != a)
2852 ira_add_conflict (obj, live_obj);
2856 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
2857 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
2859 sparseset_free (objects_live);
2860 compress_conflict_vecs ();
2862 /* Mark some copies for removing and change allocnos in the rest
2863 copies. */
2864 FOR_EACH_COPY (cp, ci)
2866 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
2867 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
2869 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2870 fprintf
2871 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
2872 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
2873 ALLOCNO_NUM (cp->first),
2874 REGNO (allocno_emit_reg (cp->first)),
2875 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
2876 ALLOCNO_NUM (cp->second),
2877 REGNO (allocno_emit_reg (cp->second)));
2878 cp->loop_tree_node = NULL;
2879 continue;
2881 first
2882 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
2883 second
2884 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
2885 node = cp->loop_tree_node;
2886 if (node == NULL)
2887 keep_p = true; /* It copy generated in ira-emit.c. */
2888 else
2890 /* Check that the copy was not propagated from level on
2891 which we will have different pseudos. */
2892 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
2893 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
2894 keep_p = ((REGNO (allocno_emit_reg (first))
2895 == REGNO (allocno_emit_reg (node_first)))
2896 && (REGNO (allocno_emit_reg (second))
2897 == REGNO (allocno_emit_reg (node_second))));
2899 if (keep_p)
2901 cp->loop_tree_node = ira_loop_tree_root;
2902 cp->first = first;
2903 cp->second = second;
2905 else
2907 cp->loop_tree_node = NULL;
2908 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2909 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
2910 cp->num, ALLOCNO_NUM (cp->first),
2911 REGNO (allocno_emit_reg (cp->first)),
2912 ALLOCNO_NUM (cp->second),
2913 REGNO (allocno_emit_reg (cp->second)));
2916 /* Remove unnecessary allocnos on lower levels of the loop tree. */
2917 FOR_EACH_ALLOCNO (a, ai)
2919 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2920 || ALLOCNO_CAP_MEMBER (a) != NULL)
2922 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2923 fprintf (ira_dump_file, " Remove a%dr%d\n",
2924 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
2925 finish_allocno (a);
2926 continue;
2928 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2929 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
2930 ALLOCNO_CAP (a) = NULL;
2931 /* Restore updated costs for assignments from reload. */
2932 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
2933 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
2934 if (! ALLOCNO_ASSIGNED_P (a))
2935 ira_free_allocno_updated_costs (a);
2936 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2937 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2939 /* Remove unnecessary copies. */
2940 FOR_EACH_COPY (cp, ci)
2942 if (cp->loop_tree_node == NULL)
2944 ira_copies[cp->num] = NULL;
2945 finish_copy (cp);
2946 continue;
2948 ira_assert
2949 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
2950 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
2951 ira_add_allocno_copy_to_list (cp);
2952 ira_swap_allocno_copy_ends_if_necessary (cp);
2954 rebuild_regno_allocno_maps ();
2955 if (ira_max_point != ira_max_point_before_emit)
2956 ira_compress_allocno_live_ranges ();
2957 ira_free (regno_top_level_allocno_map);
2962 #ifdef ENABLE_IRA_CHECKING
2963 /* Check creation of all allocnos. Allocnos on lower levels should
2964 have allocnos or caps on all upper levels. */
2965 static void
2966 check_allocno_creation (void)
2968 ira_allocno_t a;
2969 ira_allocno_iterator ai;
2970 ira_loop_tree_node_t loop_tree_node;
2972 FOR_EACH_ALLOCNO (a, ai)
2974 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2975 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
2976 ALLOCNO_NUM (a)));
2977 if (loop_tree_node == ira_loop_tree_root)
2978 continue;
2979 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2980 ira_assert (ALLOCNO_CAP (a) != NULL);
2981 else if (ALLOCNO_CAP (a) == NULL)
2982 ira_assert (loop_tree_node->parent
2983 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
2984 && bitmap_bit_p (loop_tree_node->border_allocnos,
2985 ALLOCNO_NUM (a)));
2988 #endif
2990 /* Identify allocnos which prefer a register class with a single hard register.
2991 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
2992 less likely to use the preferred singleton register. */
2993 static void
2994 update_conflict_hard_reg_costs (void)
2996 ira_allocno_t a;
2997 ira_allocno_iterator ai;
2998 int i, index, min;
3000 FOR_EACH_ALLOCNO (a, ai)
3002 reg_class_t aclass = ALLOCNO_CLASS (a);
3003 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3005 if (reg_class_size[(int) pref] != 1)
3006 continue;
3007 index = ira_class_hard_reg_index[(int) aclass]
3008 [ira_class_hard_regs[(int) pref][0]];
3009 if (index < 0)
3010 continue;
3011 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3012 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3013 continue;
3014 min = INT_MAX;
3015 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3016 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3017 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3018 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3019 if (min == INT_MAX)
3020 continue;
3021 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3022 aclass, 0);
3023 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3024 -= min - ALLOCNO_CLASS_COST (a);
3028 /* Create a internal representation (IR) for IRA (allocnos, copies,
3029 loop tree nodes). If LOOPS_P is FALSE the nodes corresponding to
3030 the loops (except the root which corresponds the all function) and
3031 correspondingly allocnos for the loops will be not created. Such
3032 parameter value is used for Chaitin-Briggs coloring. The function
3033 returns TRUE if we generate loop structure (besides nodes
3034 representing all function and the basic blocks) for regional
3035 allocation. A true return means that we really need to flatten IR
3036 before the reload. */
3037 bool
3038 ira_build (bool loops_p)
3040 df_analyze ();
3042 initiate_cost_vectors ();
3043 initiate_allocnos ();
3044 initiate_copies ();
3045 create_loop_tree_nodes (loops_p);
3046 form_loop_tree ();
3047 create_allocnos ();
3048 ira_costs ();
3049 create_allocno_objects ();
3050 ira_create_allocno_live_ranges ();
3051 remove_unnecessary_regions (false);
3052 ira_compress_allocno_live_ranges ();
3053 update_bad_spill_attribute ();
3054 loops_p = more_one_region_p ();
3055 if (loops_p)
3057 propagate_allocno_info ();
3058 create_caps ();
3060 ira_tune_allocno_costs ();
3061 #ifdef ENABLE_IRA_CHECKING
3062 check_allocno_creation ();
3063 #endif
3064 setup_min_max_allocno_live_range_point ();
3065 sort_conflict_id_map ();
3066 setup_min_max_conflict_allocno_ids ();
3067 ira_build_conflicts ();
3068 update_conflict_hard_reg_costs ();
3069 if (! ira_conflicts_p)
3071 ira_allocno_t a;
3072 ira_allocno_iterator ai;
3074 /* Remove all regions but root one. */
3075 if (loops_p)
3077 remove_unnecessary_regions (true);
3078 loops_p = false;
3080 /* We don't save hard registers around calls for fast allocation
3081 -- add caller clobbered registers as conflicting ones to
3082 allocno crossing calls. */
3083 FOR_EACH_ALLOCNO (a, ai)
3084 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3085 ior_hard_reg_conflicts (a, &call_used_reg_set);
3087 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3088 print_copies (ira_dump_file);
3089 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3091 int n, nr, nr_big;
3092 ira_allocno_t a;
3093 live_range_t r;
3094 ira_allocno_iterator ai;
3096 n = 0;
3097 nr = 0;
3098 nr_big = 0;
3099 FOR_EACH_ALLOCNO (a, ai)
3101 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3103 if (nobj > 1)
3104 nr_big++;
3105 for (j = 0; j < nobj; j++)
3107 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3108 n += OBJECT_NUM_CONFLICTS (obj);
3109 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3110 nr++;
3113 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3114 VEC_length (loop_p, ira_loops.larray), n_basic_blocks,
3115 ira_max_point);
3116 fprintf (ira_dump_file,
3117 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3118 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3120 return loops_p;
3123 /* Release the data created by function ira_build. */
3124 void
3125 ira_destroy (void)
3127 finish_loop_tree_nodes ();
3128 finish_copies ();
3129 finish_allocnos ();
3130 finish_cost_vectors ();
3131 ira_finish_allocno_live_ranges ();