gcc/ChangeLog
[official-gcc.git] / gcc / ira-build.c
blob0e2fd0c551073ba52205830698a5c752ebb7a98c
1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "target.h"
28 #include "regs.h"
29 #include "flags.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "diagnostic-core.h"
35 #include "params.h"
36 #include "df.h"
37 #include "reload.h"
38 #include "sparseset.h"
39 #include "ira-int.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
43 ira_loop_tree_node_t);
45 /* The root of the loop tree corresponding to the all function. */
46 ira_loop_tree_node_t ira_loop_tree_root;
48 /* Height of the loop tree. */
49 int ira_loop_tree_height;
51 /* All nodes representing basic blocks are referred through the
52 following array. We can not use basic block member `aux' for this
53 because it is used for insertion of insns on edges. */
54 ira_loop_tree_node_t ira_bb_nodes;
56 /* All nodes representing loops are referred through the following
57 array. */
58 ira_loop_tree_node_t ira_loop_nodes;
60 /* And size of the ira_loop_nodes array. */
61 unsigned int ira_loop_nodes_count;
63 /* Map regno -> allocnos with given regno (see comments for
64 allocno member `next_regno_allocno'). */
65 ira_allocno_t *ira_regno_allocno_map;
67 /* Array of references to all allocnos. The order number of the
68 allocno corresponds to the index in the array. Removed allocnos
69 have NULL element value. */
70 ira_allocno_t *ira_allocnos;
72 /* Sizes of the previous array. */
73 int ira_allocnos_num;
75 /* Count of conflict record structures we've created, used when creating
76 a new conflict id. */
77 int ira_objects_num;
79 /* Map a conflict id to its conflict record. */
80 ira_object_t *ira_object_id_map;
82 /* Array of references to all copies. The order number of the copy
83 corresponds to the index in the array. Removed copies have NULL
84 element value. */
85 ira_copy_t *ira_copies;
87 /* Size of the previous array. */
88 int ira_copies_num;
92 /* LAST_BASIC_BLOCK before generating additional insns because of live
93 range splitting. Emitting insns on a critical edge creates a new
94 basic block. */
95 static int last_basic_block_before_change;
97 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
98 the member loop_num. */
99 static void
100 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
102 int max_regno = max_reg_num ();
104 node->regno_allocno_map
105 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
106 memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
107 memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
108 node->all_allocnos = ira_allocate_bitmap ();
109 node->modified_regnos = ira_allocate_bitmap ();
110 node->border_allocnos = ira_allocate_bitmap ();
111 node->local_copies = ira_allocate_bitmap ();
112 node->loop_num = loop_num;
113 node->children = NULL;
114 node->subloops = NULL;
118 /* The following function allocates the loop tree nodes. If
119 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
120 the root which corresponds the all function) will be not allocated
121 but nodes will still be allocated for basic blocks. */
122 static void
123 create_loop_tree_nodes (void)
125 unsigned int i, j;
126 bool skip_p;
127 edge_iterator ei;
128 edge e;
129 vec<edge> edges;
130 loop_p loop;
132 ira_bb_nodes
133 = ((struct ira_loop_tree_node *)
134 ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
135 last_basic_block_before_change = last_basic_block;
136 for (i = 0; i < (unsigned int) last_basic_block; i++)
138 ira_bb_nodes[i].regno_allocno_map = NULL;
139 memset (ira_bb_nodes[i].reg_pressure, 0,
140 sizeof (ira_bb_nodes[i].reg_pressure));
141 ira_bb_nodes[i].all_allocnos = NULL;
142 ira_bb_nodes[i].modified_regnos = NULL;
143 ira_bb_nodes[i].border_allocnos = NULL;
144 ira_bb_nodes[i].local_copies = NULL;
146 if (current_loops == NULL)
148 ira_loop_nodes_count = 1;
149 ira_loop_nodes = ((struct ira_loop_tree_node *)
150 ira_allocate (sizeof (struct ira_loop_tree_node)));
151 init_loop_tree_node (ira_loop_nodes, 0);
152 return;
154 ira_loop_nodes_count = number_of_loops (cfun);
155 ira_loop_nodes = ((struct ira_loop_tree_node *)
156 ira_allocate (sizeof (struct ira_loop_tree_node)
157 * ira_loop_nodes_count));
158 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
160 if (loop_outer (loop) != NULL)
162 ira_loop_nodes[i].regno_allocno_map = NULL;
163 skip_p = false;
164 FOR_EACH_EDGE (e, ei, loop->header->preds)
165 if (e->src != loop->latch
166 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
168 skip_p = true;
169 break;
171 if (skip_p)
172 continue;
173 edges = get_loop_exit_edges (loop);
174 FOR_EACH_VEC_ELT (edges, j, e)
175 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
177 skip_p = true;
178 break;
180 edges.release ();
181 if (skip_p)
182 continue;
184 init_loop_tree_node (&ira_loop_nodes[i], loop->num);
188 /* The function returns TRUE if there are more one allocation
189 region. */
190 static bool
191 more_one_region_p (void)
193 unsigned int i;
194 loop_p loop;
196 if (current_loops != NULL)
197 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
198 if (ira_loop_nodes[i].regno_allocno_map != NULL
199 && ira_loop_tree_root != &ira_loop_nodes[i])
200 return true;
201 return false;
204 /* Free the loop tree node of a loop. */
205 static void
206 finish_loop_tree_node (ira_loop_tree_node_t loop)
208 if (loop->regno_allocno_map != NULL)
210 ira_assert (loop->bb == NULL);
211 ira_free_bitmap (loop->local_copies);
212 ira_free_bitmap (loop->border_allocnos);
213 ira_free_bitmap (loop->modified_regnos);
214 ira_free_bitmap (loop->all_allocnos);
215 ira_free (loop->regno_allocno_map);
216 loop->regno_allocno_map = NULL;
220 /* Free the loop tree nodes. */
221 static void
222 finish_loop_tree_nodes (void)
224 unsigned int i;
226 for (i = 0; i < ira_loop_nodes_count; i++)
227 finish_loop_tree_node (&ira_loop_nodes[i]);
228 ira_free (ira_loop_nodes);
229 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
231 if (ira_bb_nodes[i].local_copies != NULL)
232 ira_free_bitmap (ira_bb_nodes[i].local_copies);
233 if (ira_bb_nodes[i].border_allocnos != NULL)
234 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
235 if (ira_bb_nodes[i].modified_regnos != NULL)
236 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
237 if (ira_bb_nodes[i].all_allocnos != NULL)
238 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
239 if (ira_bb_nodes[i].regno_allocno_map != NULL)
240 ira_free (ira_bb_nodes[i].regno_allocno_map);
242 ira_free (ira_bb_nodes);
247 /* The following recursive function adds LOOP to the loop tree
248 hierarchy. LOOP is added only once. If LOOP is NULL we adding
249 loop designating the whole function when CFG loops are not
250 built. */
251 static void
252 add_loop_to_tree (struct loop *loop)
254 int loop_num;
255 struct loop *parent;
256 ira_loop_tree_node_t loop_node, parent_node;
258 /* We can not use loop node access macros here because of potential
259 checking and because the nodes are not initialized enough
260 yet. */
261 if (loop != NULL && loop_outer (loop) != NULL)
262 add_loop_to_tree (loop_outer (loop));
263 loop_num = loop != NULL ? loop->num : 0;
264 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
265 && ira_loop_nodes[loop_num].children == NULL)
267 /* We have not added loop node to the tree yet. */
268 loop_node = &ira_loop_nodes[loop_num];
269 loop_node->loop = loop;
270 loop_node->bb = NULL;
271 if (loop == NULL)
272 parent = NULL;
273 else
275 for (parent = loop_outer (loop);
276 parent != NULL;
277 parent = loop_outer (parent))
278 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
279 break;
281 if (parent == NULL)
283 loop_node->next = NULL;
284 loop_node->subloop_next = NULL;
285 loop_node->parent = NULL;
287 else
289 parent_node = &ira_loop_nodes[parent->num];
290 loop_node->next = parent_node->children;
291 parent_node->children = loop_node;
292 loop_node->subloop_next = parent_node->subloops;
293 parent_node->subloops = loop_node;
294 loop_node->parent = parent_node;
299 /* The following recursive function sets up levels of nodes of the
300 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
301 The function returns maximal value of level in the tree + 1. */
302 static int
303 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
305 int height, max_height;
306 ira_loop_tree_node_t subloop_node;
308 ira_assert (loop_node->bb == NULL);
309 loop_node->level = level;
310 max_height = level + 1;
311 for (subloop_node = loop_node->subloops;
312 subloop_node != NULL;
313 subloop_node = subloop_node->subloop_next)
315 ira_assert (subloop_node->bb == NULL);
316 height = setup_loop_tree_level (subloop_node, level + 1);
317 if (height > max_height)
318 max_height = height;
320 return max_height;
323 /* Create the loop tree. The algorithm is designed to provide correct
324 order of loops (they are ordered by their last loop BB) and basic
325 blocks in the chain formed by member next. */
326 static void
327 form_loop_tree (void)
329 basic_block bb;
330 struct loop *parent;
331 ira_loop_tree_node_t bb_node, loop_node;
333 /* We can not use loop/bb node access macros because of potential
334 checking and because the nodes are not initialized enough
335 yet. */
336 FOR_EACH_BB (bb)
338 bb_node = &ira_bb_nodes[bb->index];
339 bb_node->bb = bb;
340 bb_node->loop = NULL;
341 bb_node->subloops = NULL;
342 bb_node->children = NULL;
343 bb_node->subloop_next = NULL;
344 bb_node->next = NULL;
345 if (current_loops == NULL)
346 parent = NULL;
347 else
349 for (parent = bb->loop_father;
350 parent != NULL;
351 parent = loop_outer (parent))
352 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
353 break;
355 add_loop_to_tree (parent);
356 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
357 bb_node->next = loop_node->children;
358 bb_node->parent = loop_node;
359 loop_node->children = bb_node;
361 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
362 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
363 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
368 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
369 tree nodes. */
370 static void
371 rebuild_regno_allocno_maps (void)
373 unsigned int l;
374 int max_regno, regno;
375 ira_allocno_t a;
376 ira_loop_tree_node_t loop_tree_node;
377 loop_p loop;
378 ira_allocno_iterator ai;
380 ira_assert (current_loops != NULL);
381 max_regno = max_reg_num ();
382 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
383 if (ira_loop_nodes[l].regno_allocno_map != NULL)
385 ira_free (ira_loop_nodes[l].regno_allocno_map);
386 ira_loop_nodes[l].regno_allocno_map
387 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
388 * max_regno);
389 memset (ira_loop_nodes[l].regno_allocno_map, 0,
390 sizeof (ira_allocno_t) * max_regno);
392 ira_free (ira_regno_allocno_map);
393 ira_regno_allocno_map
394 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
395 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
396 FOR_EACH_ALLOCNO (a, ai)
398 if (ALLOCNO_CAP_MEMBER (a) != NULL)
399 /* Caps are not in the regno allocno maps. */
400 continue;
401 regno = ALLOCNO_REGNO (a);
402 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
403 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
404 ira_regno_allocno_map[regno] = a;
405 if (loop_tree_node->regno_allocno_map[regno] == NULL)
406 /* Remember that we can create temporary allocnos to break
407 cycles in register shuffle. */
408 loop_tree_node->regno_allocno_map[regno] = a;
413 /* Pools for allocnos, allocno live ranges and objects. */
414 static alloc_pool allocno_pool, live_range_pool, object_pool;
416 /* Vec containing references to all created allocnos. It is a
417 container of array allocnos. */
418 static vec<ira_allocno_t> allocno_vec;
420 /* Vec containing references to all created ira_objects. It is a
421 container of ira_object_id_map. */
422 static vec<ira_object_t> ira_object_id_map_vec;
424 /* Initialize data concerning allocnos. */
425 static void
426 initiate_allocnos (void)
428 live_range_pool
429 = create_alloc_pool ("live ranges",
430 sizeof (struct live_range), 100);
431 allocno_pool
432 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
433 object_pool
434 = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
435 allocno_vec.create (max_reg_num () * 2);
436 ira_allocnos = NULL;
437 ira_allocnos_num = 0;
438 ira_objects_num = 0;
439 ira_object_id_map_vec.create (max_reg_num () * 2);
440 ira_object_id_map = NULL;
441 ira_regno_allocno_map
442 = (ira_allocno_t *) ira_allocate (max_reg_num ()
443 * sizeof (ira_allocno_t));
444 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
447 /* Create and return an object corresponding to a new allocno A. */
448 static ira_object_t
449 ira_create_object (ira_allocno_t a, int subword)
451 enum reg_class aclass = ALLOCNO_CLASS (a);
452 ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
454 OBJECT_ALLOCNO (obj) = a;
455 OBJECT_SUBWORD (obj) = subword;
456 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
457 OBJECT_CONFLICT_VEC_P (obj) = false;
458 OBJECT_CONFLICT_ARRAY (obj) = NULL;
459 OBJECT_NUM_CONFLICTS (obj) = 0;
460 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
461 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
462 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
463 reg_class_contents[aclass]);
464 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
465 reg_class_contents[aclass]);
466 OBJECT_MIN (obj) = INT_MAX;
467 OBJECT_MAX (obj) = -1;
468 OBJECT_LIVE_RANGES (obj) = NULL;
470 ira_object_id_map_vec.safe_push (obj);
471 ira_object_id_map
472 = ira_object_id_map_vec.address ();
473 ira_objects_num = ira_object_id_map_vec.length ();
475 return obj;
478 /* Create and return the allocno corresponding to REGNO in
479 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
480 same regno if CAP_P is FALSE. */
481 ira_allocno_t
482 ira_create_allocno (int regno, bool cap_p,
483 ira_loop_tree_node_t loop_tree_node)
485 ira_allocno_t a;
487 a = (ira_allocno_t) pool_alloc (allocno_pool);
488 ALLOCNO_REGNO (a) = regno;
489 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
490 if (! cap_p)
492 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
493 ira_regno_allocno_map[regno] = a;
494 if (loop_tree_node->regno_allocno_map[regno] == NULL)
495 /* Remember that we can create temporary allocnos to break
496 cycles in register shuffle on region borders (see
497 ira-emit.c). */
498 loop_tree_node->regno_allocno_map[regno] = a;
500 ALLOCNO_CAP (a) = NULL;
501 ALLOCNO_CAP_MEMBER (a) = NULL;
502 ALLOCNO_NUM (a) = ira_allocnos_num;
503 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
504 ALLOCNO_NREFS (a) = 0;
505 ALLOCNO_FREQ (a) = 0;
506 ALLOCNO_HARD_REGNO (a) = -1;
507 ALLOCNO_CALL_FREQ (a) = 0;
508 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
509 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
510 #ifdef STACK_REGS
511 ALLOCNO_NO_STACK_REG_P (a) = false;
512 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
513 #endif
514 ALLOCNO_DONT_REASSIGN_P (a) = false;
515 ALLOCNO_BAD_SPILL_P (a) = false;
516 ALLOCNO_ASSIGNED_P (a) = false;
517 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
518 ALLOCNO_COPIES (a) = NULL;
519 ALLOCNO_HARD_REG_COSTS (a) = NULL;
520 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
521 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
522 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
523 ALLOCNO_CLASS (a) = NO_REGS;
524 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
525 ALLOCNO_CLASS_COST (a) = 0;
526 ALLOCNO_MEMORY_COST (a) = 0;
527 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
528 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
529 ALLOCNO_NUM_OBJECTS (a) = 0;
531 ALLOCNO_ADD_DATA (a) = NULL;
532 allocno_vec.safe_push (a);
533 ira_allocnos = allocno_vec.address ();
534 ira_allocnos_num = allocno_vec.length ();
536 return a;
539 /* Set up register class for A and update its conflict hard
540 registers. */
541 void
542 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
544 ira_allocno_object_iterator oi;
545 ira_object_t obj;
547 ALLOCNO_CLASS (a) = aclass;
548 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
550 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
551 reg_class_contents[aclass]);
552 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
553 reg_class_contents[aclass]);
557 /* Determine the number of objects we should associate with allocno A
558 and allocate them. */
559 void
560 ira_create_allocno_objects (ira_allocno_t a)
562 enum machine_mode mode = ALLOCNO_MODE (a);
563 enum reg_class aclass = ALLOCNO_CLASS (a);
564 int n = ira_reg_class_max_nregs[aclass][mode];
565 int i;
567 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
568 n = 1;
570 ALLOCNO_NUM_OBJECTS (a) = n;
571 for (i = 0; i < n; i++)
572 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
575 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
576 ALLOCNO_OBJECT structures. This must be called after the allocno
577 classes are known. */
578 static void
579 create_allocno_objects (void)
581 ira_allocno_t a;
582 ira_allocno_iterator ai;
584 FOR_EACH_ALLOCNO (a, ai)
585 ira_create_allocno_objects (a);
588 /* Merge hard register conflict information for all objects associated with
589 allocno TO into the corresponding objects associated with FROM.
590 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
591 static void
592 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
593 bool total_only)
595 int i;
596 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
597 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
599 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
600 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
602 if (!total_only)
603 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
604 OBJECT_CONFLICT_HARD_REGS (from_obj));
605 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
606 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
608 #ifdef STACK_REGS
609 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
610 ALLOCNO_NO_STACK_REG_P (to) = true;
611 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
612 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
613 #endif
616 /* Update hard register conflict information for all objects associated with
617 A to include the regs in SET. */
618 void
619 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
621 ira_allocno_object_iterator i;
622 ira_object_t obj;
624 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
626 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
627 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
631 /* Return TRUE if a conflict vector with NUM elements is more
632 profitable than a conflict bit vector for OBJ. */
633 bool
634 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
636 int nw;
637 int max = OBJECT_MAX (obj);
638 int min = OBJECT_MIN (obj);
640 if (max < min)
641 /* We prefer a bit vector in such case because it does not result
642 in allocation. */
643 return false;
645 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
646 return (2 * sizeof (ira_object_t) * (num + 1)
647 < 3 * nw * sizeof (IRA_INT_TYPE));
650 /* Allocates and initialize the conflict vector of OBJ for NUM
651 conflicting objects. */
652 void
653 ira_allocate_conflict_vec (ira_object_t obj, int num)
655 int size;
656 ira_object_t *vec;
658 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
659 num++; /* for NULL end marker */
660 size = sizeof (ira_object_t) * num;
661 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
662 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
663 vec[0] = NULL;
664 OBJECT_NUM_CONFLICTS (obj) = 0;
665 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
666 OBJECT_CONFLICT_VEC_P (obj) = true;
669 /* Allocate and initialize the conflict bit vector of OBJ. */
670 static void
671 allocate_conflict_bit_vec (ira_object_t obj)
673 unsigned int size;
675 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
676 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
677 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
678 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
679 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
680 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
681 OBJECT_CONFLICT_VEC_P (obj) = false;
684 /* Allocate and initialize the conflict vector or conflict bit vector
685 of OBJ for NUM conflicting allocnos whatever is more profitable. */
686 void
687 ira_allocate_object_conflicts (ira_object_t obj, int num)
689 if (ira_conflict_vector_profitable_p (obj, num))
690 ira_allocate_conflict_vec (obj, num);
691 else
692 allocate_conflict_bit_vec (obj);
695 /* Add OBJ2 to the conflicts of OBJ1. */
696 static void
697 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
699 int num;
700 unsigned int size;
702 if (OBJECT_CONFLICT_VEC_P (obj1))
704 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
705 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
706 num = curr_num + 2;
707 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
709 ira_object_t *newvec;
710 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
711 newvec = (ira_object_t *) ira_allocate (size);
712 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
713 ira_free (vec);
714 vec = newvec;
715 OBJECT_CONFLICT_ARRAY (obj1) = vec;
716 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
718 vec[num - 2] = obj2;
719 vec[num - 1] = NULL;
720 OBJECT_NUM_CONFLICTS (obj1)++;
722 else
724 int nw, added_head_nw, id;
725 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
727 id = OBJECT_CONFLICT_ID (obj2);
728 if (OBJECT_MIN (obj1) > id)
730 /* Expand head of the bit vector. */
731 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
732 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
733 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
734 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
736 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
737 vec, nw * sizeof (IRA_INT_TYPE));
738 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
740 else
742 size
743 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
744 vec = (IRA_INT_TYPE *) ira_allocate (size);
745 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
746 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
747 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
748 memset ((char *) vec
749 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
750 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
751 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
752 OBJECT_CONFLICT_ARRAY (obj1) = vec;
753 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
755 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
757 else if (OBJECT_MAX (obj1) < id)
759 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
760 size = nw * sizeof (IRA_INT_TYPE);
761 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
763 /* Expand tail of the bit vector. */
764 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
765 vec = (IRA_INT_TYPE *) ira_allocate (size);
766 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
767 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
768 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
769 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
770 OBJECT_CONFLICT_ARRAY (obj1) = vec;
771 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
773 OBJECT_MAX (obj1) = id;
775 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
779 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
780 static void
781 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
783 add_to_conflicts (obj1, obj2);
784 add_to_conflicts (obj2, obj1);
787 /* Clear all conflicts of OBJ. */
788 static void
789 clear_conflicts (ira_object_t obj)
791 if (OBJECT_CONFLICT_VEC_P (obj))
793 OBJECT_NUM_CONFLICTS (obj) = 0;
794 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
796 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
798 int nw;
800 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
801 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
805 /* The array used to find duplications in conflict vectors of
806 allocnos. */
807 static int *conflict_check;
809 /* The value used to mark allocation presence in conflict vector of
810 the current allocno. */
811 static int curr_conflict_check_tick;
813 /* Remove duplications in conflict vector of OBJ. */
814 static void
815 compress_conflict_vec (ira_object_t obj)
817 ira_object_t *vec, conflict_obj;
818 int i, j;
820 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
821 vec = OBJECT_CONFLICT_VEC (obj);
822 curr_conflict_check_tick++;
823 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
825 int id = OBJECT_CONFLICT_ID (conflict_obj);
826 if (conflict_check[id] != curr_conflict_check_tick)
828 conflict_check[id] = curr_conflict_check_tick;
829 vec[j++] = conflict_obj;
832 OBJECT_NUM_CONFLICTS (obj) = j;
833 vec[j] = NULL;
836 /* Remove duplications in conflict vectors of all allocnos. */
837 static void
838 compress_conflict_vecs (void)
840 ira_object_t obj;
841 ira_object_iterator oi;
843 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
844 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
845 curr_conflict_check_tick = 0;
846 FOR_EACH_OBJECT (obj, oi)
848 if (OBJECT_CONFLICT_VEC_P (obj))
849 compress_conflict_vec (obj);
851 ira_free (conflict_check);
854 /* This recursive function outputs allocno A and if it is a cap the
855 function outputs its members. */
856 void
857 ira_print_expanded_allocno (ira_allocno_t a)
859 basic_block bb;
861 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
862 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
863 fprintf (ira_dump_file, ",b%d", bb->index);
864 else
865 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
866 if (ALLOCNO_CAP_MEMBER (a) != NULL)
868 fprintf (ira_dump_file, ":");
869 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
871 fprintf (ira_dump_file, ")");
874 /* Create and return the cap representing allocno A in the
875 parent loop. */
876 static ira_allocno_t
877 create_cap_allocno (ira_allocno_t a)
879 ira_allocno_t cap;
880 ira_loop_tree_node_t parent;
881 enum reg_class aclass;
883 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
884 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
885 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
886 aclass = ALLOCNO_CLASS (a);
887 ira_set_allocno_class (cap, aclass);
888 ira_create_allocno_objects (cap);
889 ALLOCNO_CAP_MEMBER (cap) = a;
890 ALLOCNO_CAP (a) = cap;
891 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
892 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
893 ira_allocate_and_copy_costs
894 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
895 ira_allocate_and_copy_costs
896 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
897 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
898 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
899 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
900 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
901 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
903 merge_hard_reg_conflicts (a, cap, false);
905 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
906 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
907 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
909 fprintf (ira_dump_file, " Creating cap ");
910 ira_print_expanded_allocno (cap);
911 fprintf (ira_dump_file, "\n");
913 return cap;
916 /* Create and return a live range for OBJECT with given attributes. */
917 live_range_t
918 ira_create_live_range (ira_object_t obj, int start, int finish,
919 live_range_t next)
921 live_range_t p;
923 p = (live_range_t) pool_alloc (live_range_pool);
924 p->object = obj;
925 p->start = start;
926 p->finish = finish;
927 p->next = next;
928 return p;
931 /* Create a new live range for OBJECT and queue it at the head of its
932 live range list. */
933 void
934 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
936 live_range_t p;
937 p = ira_create_live_range (object, start, finish,
938 OBJECT_LIVE_RANGES (object));
939 OBJECT_LIVE_RANGES (object) = p;
942 /* Copy allocno live range R and return the result. */
943 static live_range_t
944 copy_live_range (live_range_t r)
946 live_range_t p;
948 p = (live_range_t) pool_alloc (live_range_pool);
949 *p = *r;
950 return p;
953 /* Copy allocno live range list given by its head R and return the
954 result. */
955 live_range_t
956 ira_copy_live_range_list (live_range_t r)
958 live_range_t p, first, last;
960 if (r == NULL)
961 return NULL;
962 for (first = last = NULL; r != NULL; r = r->next)
964 p = copy_live_range (r);
965 if (first == NULL)
966 first = p;
967 else
968 last->next = p;
969 last = p;
971 return first;
974 /* Merge ranges R1 and R2 and returns the result. The function
975 maintains the order of ranges and tries to minimize number of the
976 result ranges. */
977 live_range_t
978 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
980 live_range_t first, last, temp;
982 if (r1 == NULL)
983 return r2;
984 if (r2 == NULL)
985 return r1;
986 for (first = last = NULL; r1 != NULL && r2 != NULL;)
988 if (r1->start < r2->start)
990 temp = r1;
991 r1 = r2;
992 r2 = temp;
994 if (r1->start <= r2->finish + 1)
996 /* Intersected ranges: merge r1 and r2 into r1. */
997 r1->start = r2->start;
998 if (r1->finish < r2->finish)
999 r1->finish = r2->finish;
1000 temp = r2;
1001 r2 = r2->next;
1002 ira_finish_live_range (temp);
1003 if (r2 == NULL)
1005 /* To try to merge with subsequent ranges in r1. */
1006 r2 = r1->next;
1007 r1->next = NULL;
1010 else
1012 /* Add r1 to the result. */
1013 if (first == NULL)
1014 first = last = r1;
1015 else
1017 last->next = r1;
1018 last = r1;
1020 r1 = r1->next;
1021 if (r1 == NULL)
1023 /* To try to merge with subsequent ranges in r2. */
1024 r1 = r2->next;
1025 r2->next = NULL;
1029 if (r1 != NULL)
1031 if (first == NULL)
1032 first = r1;
1033 else
1034 last->next = r1;
1035 ira_assert (r1->next == NULL);
1037 else if (r2 != NULL)
1039 if (first == NULL)
1040 first = r2;
1041 else
1042 last->next = r2;
1043 ira_assert (r2->next == NULL);
1045 else
1047 ira_assert (last->next == NULL);
1049 return first;
1052 /* Return TRUE if live ranges R1 and R2 intersect. */
1053 bool
1054 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1056 /* Remember the live ranges are always kept ordered. */
1057 while (r1 != NULL && r2 != NULL)
1059 if (r1->start > r2->finish)
1060 r1 = r1->next;
1061 else if (r2->start > r1->finish)
1062 r2 = r2->next;
1063 else
1064 return true;
1066 return false;
1069 /* Free allocno live range R. */
1070 void
1071 ira_finish_live_range (live_range_t r)
1073 pool_free (live_range_pool, r);
1076 /* Free list of allocno live ranges starting with R. */
1077 void
1078 ira_finish_live_range_list (live_range_t r)
1080 live_range_t next_r;
1082 for (; r != NULL; r = next_r)
1084 next_r = r->next;
1085 ira_finish_live_range (r);
1089 /* Free updated register costs of allocno A. */
1090 void
1091 ira_free_allocno_updated_costs (ira_allocno_t a)
1093 enum reg_class aclass;
1095 aclass = ALLOCNO_CLASS (a);
1096 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1097 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1098 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1099 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1100 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1101 aclass);
1102 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1105 /* Free and nullify all cost vectors allocated earlier for allocno
1106 A. */
1107 static void
1108 ira_free_allocno_costs (ira_allocno_t a)
1110 enum reg_class aclass = ALLOCNO_CLASS (a);
1111 ira_object_t obj;
1112 ira_allocno_object_iterator oi;
1114 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1116 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1117 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1118 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1119 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1120 pool_free (object_pool, obj);
1123 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1124 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1125 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1126 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1127 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1128 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1129 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1130 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1131 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1132 aclass);
1133 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1134 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1135 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1136 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1139 /* Free the memory allocated for allocno A. */
1140 static void
1141 finish_allocno (ira_allocno_t a)
1143 ira_free_allocno_costs (a);
1144 pool_free (allocno_pool, a);
1147 /* Free the memory allocated for all allocnos. */
1148 static void
1149 finish_allocnos (void)
1151 ira_allocno_t a;
1152 ira_allocno_iterator ai;
1154 FOR_EACH_ALLOCNO (a, ai)
1155 finish_allocno (a);
1156 ira_free (ira_regno_allocno_map);
1157 ira_object_id_map_vec.release ();
1158 allocno_vec.release ();
1159 free_alloc_pool (allocno_pool);
1160 free_alloc_pool (object_pool);
1161 free_alloc_pool (live_range_pool);
1166 /* Pools for copies. */
1167 static alloc_pool copy_pool;
1169 /* Vec containing references to all created copies. It is a
1170 container of array ira_copies. */
1171 static vec<ira_copy_t> copy_vec;
1173 /* The function initializes data concerning allocno copies. */
1174 static void
1175 initiate_copies (void)
1177 copy_pool
1178 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1179 copy_vec.create (get_max_uid ());
1180 ira_copies = NULL;
1181 ira_copies_num = 0;
1184 /* Return copy connecting A1 and A2 and originated from INSN of
1185 LOOP_TREE_NODE if any. */
1186 static ira_copy_t
1187 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1188 ira_loop_tree_node_t loop_tree_node)
1190 ira_copy_t cp, next_cp;
1191 ira_allocno_t another_a;
1193 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1195 if (cp->first == a1)
1197 next_cp = cp->next_first_allocno_copy;
1198 another_a = cp->second;
1200 else if (cp->second == a1)
1202 next_cp = cp->next_second_allocno_copy;
1203 another_a = cp->first;
1205 else
1206 gcc_unreachable ();
1207 if (another_a == a2 && cp->insn == insn
1208 && cp->loop_tree_node == loop_tree_node)
1209 return cp;
1211 return NULL;
1214 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1215 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1216 ira_copy_t
1217 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1218 bool constraint_p, rtx insn,
1219 ira_loop_tree_node_t loop_tree_node)
1221 ira_copy_t cp;
1223 cp = (ira_copy_t) pool_alloc (copy_pool);
1224 cp->num = ira_copies_num;
1225 cp->first = first;
1226 cp->second = second;
1227 cp->freq = freq;
1228 cp->constraint_p = constraint_p;
1229 cp->insn = insn;
1230 cp->loop_tree_node = loop_tree_node;
1231 copy_vec.safe_push (cp);
1232 ira_copies = copy_vec.address ();
1233 ira_copies_num = copy_vec.length ();
1234 return cp;
1237 /* Attach a copy CP to allocnos involved into the copy. */
1238 void
1239 ira_add_allocno_copy_to_list (ira_copy_t cp)
1241 ira_allocno_t first = cp->first, second = cp->second;
1243 cp->prev_first_allocno_copy = NULL;
1244 cp->prev_second_allocno_copy = NULL;
1245 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1246 if (cp->next_first_allocno_copy != NULL)
1248 if (cp->next_first_allocno_copy->first == first)
1249 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1250 else
1251 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1253 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1254 if (cp->next_second_allocno_copy != NULL)
1256 if (cp->next_second_allocno_copy->second == second)
1257 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1258 else
1259 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1261 ALLOCNO_COPIES (first) = cp;
1262 ALLOCNO_COPIES (second) = cp;
1265 /* Make a copy CP a canonical copy where number of the
1266 first allocno is less than the second one. */
1267 void
1268 ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1270 ira_allocno_t temp;
1271 ira_copy_t temp_cp;
1273 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1274 return;
1276 temp = cp->first;
1277 cp->first = cp->second;
1278 cp->second = temp;
1280 temp_cp = cp->prev_first_allocno_copy;
1281 cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1282 cp->prev_second_allocno_copy = temp_cp;
1284 temp_cp = cp->next_first_allocno_copy;
1285 cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1286 cp->next_second_allocno_copy = temp_cp;
1289 /* Create (or update frequency if the copy already exists) and return
1290 the copy of allocnos FIRST and SECOND with frequency FREQ
1291 corresponding to move insn INSN (if any) and originated from
1292 LOOP_TREE_NODE. */
1293 ira_copy_t
1294 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1295 bool constraint_p, rtx insn,
1296 ira_loop_tree_node_t loop_tree_node)
1298 ira_copy_t cp;
1300 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1302 cp->freq += freq;
1303 return cp;
1305 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1306 loop_tree_node);
1307 ira_assert (first != NULL && second != NULL);
1308 ira_add_allocno_copy_to_list (cp);
1309 ira_swap_allocno_copy_ends_if_necessary (cp);
1310 return cp;
1313 /* Print info about copy CP into file F. */
1314 static void
1315 print_copy (FILE *f, ira_copy_t cp)
1317 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1318 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1319 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1320 cp->insn != NULL
1321 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1324 DEBUG_FUNCTION void
1325 debug (ira_allocno_copy &ref)
1327 print_copy (stderr, &ref);
1330 DEBUG_FUNCTION void
1331 debug (ira_allocno_copy *ptr)
1333 if (ptr)
1334 debug (*ptr);
1335 else
1336 fprintf (stderr, "<nil>\n");
1339 /* Print info about copy CP into stderr. */
1340 void
1341 ira_debug_copy (ira_copy_t cp)
1343 print_copy (stderr, cp);
1346 /* Print info about all copies into file F. */
1347 static void
1348 print_copies (FILE *f)
1350 ira_copy_t cp;
1351 ira_copy_iterator ci;
1353 FOR_EACH_COPY (cp, ci)
1354 print_copy (f, cp);
1357 /* Print info about all copies into stderr. */
1358 void
1359 ira_debug_copies (void)
1361 print_copies (stderr);
1364 /* Print info about copies involving allocno A into file F. */
1365 static void
1366 print_allocno_copies (FILE *f, ira_allocno_t a)
1368 ira_allocno_t another_a;
1369 ira_copy_t cp, next_cp;
1371 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1372 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1374 if (cp->first == a)
1376 next_cp = cp->next_first_allocno_copy;
1377 another_a = cp->second;
1379 else if (cp->second == a)
1381 next_cp = cp->next_second_allocno_copy;
1382 another_a = cp->first;
1384 else
1385 gcc_unreachable ();
1386 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1387 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1389 fprintf (f, "\n");
1392 DEBUG_FUNCTION void
1393 debug (ira_allocno &ref)
1395 print_allocno_copies (stderr, &ref);
1398 DEBUG_FUNCTION void
1399 debug (ira_allocno *ptr)
1401 if (ptr)
1402 debug (*ptr);
1403 else
1404 fprintf (stderr, "<nil>\n");
1408 /* Print info about copies involving allocno A into stderr. */
1409 void
1410 ira_debug_allocno_copies (ira_allocno_t a)
1412 print_allocno_copies (stderr, a);
1415 /* The function frees memory allocated for copy CP. */
1416 static void
1417 finish_copy (ira_copy_t cp)
1419 pool_free (copy_pool, cp);
1423 /* Free memory allocated for all copies. */
1424 static void
1425 finish_copies (void)
1427 ira_copy_t cp;
1428 ira_copy_iterator ci;
1430 FOR_EACH_COPY (cp, ci)
1431 finish_copy (cp);
1432 copy_vec.release ();
1433 free_alloc_pool (copy_pool);
1438 /* Pools for cost vectors. It is defined only for allocno classes. */
1439 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1441 /* The function initiates work with hard register cost vectors. It
1442 creates allocation pool for each allocno class. */
1443 static void
1444 initiate_cost_vectors (void)
1446 int i;
1447 enum reg_class aclass;
1449 for (i = 0; i < ira_allocno_classes_num; i++)
1451 aclass = ira_allocno_classes[i];
1452 cost_vector_pool[aclass]
1453 = create_alloc_pool ("cost vectors",
1454 sizeof (int) * ira_class_hard_regs_num[aclass],
1455 100);
1459 /* Allocate and return a cost vector VEC for ACLASS. */
1460 int *
1461 ira_allocate_cost_vector (reg_class_t aclass)
1463 return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
1466 /* Free a cost vector VEC for ACLASS. */
1467 void
1468 ira_free_cost_vector (int *vec, reg_class_t aclass)
1470 ira_assert (vec != NULL);
1471 pool_free (cost_vector_pool[(int) aclass], vec);
1474 /* Finish work with hard register cost vectors. Release allocation
1475 pool for each allocno class. */
1476 static void
1477 finish_cost_vectors (void)
1479 int i;
1480 enum reg_class aclass;
1482 for (i = 0; i < ira_allocno_classes_num; i++)
1484 aclass = ira_allocno_classes[i];
1485 free_alloc_pool (cost_vector_pool[aclass]);
1491 /* Compute a post-ordering of the reverse control flow of the loop body
1492 designated by the children nodes of LOOP_NODE, whose body nodes in
1493 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1494 of the reverse loop body.
1496 For the post-order of the reverse CFG, we visit the basic blocks in
1497 LOOP_PREORDER array in the reverse order of where they appear.
1498 This is important: We do not just want to compute a post-order of
1499 the reverse CFG, we want to make a best-guess for a visiting order that
1500 minimizes the number of chain elements per allocno live range. If the
1501 blocks would be visited in a different order, we would still compute a
1502 correct post-ordering but it would be less likely that two nodes
1503 connected by an edge in the CFG are neighbours in the topsort. */
1505 static vec<ira_loop_tree_node_t>
1506 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1507 vec<ira_loop_tree_node_t> loop_preorder)
1509 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1510 unsigned int n_loop_preorder;
1512 n_loop_preorder = loop_preorder.length ();
1513 if (n_loop_preorder != 0)
1515 ira_loop_tree_node_t subloop_node;
1516 unsigned int i;
1517 vec<ira_loop_tree_node_t> dfs_stack;
1519 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1520 the flag to mark blocks we still have to visit to add them to
1521 our post-order. Define an alias to avoid confusion. */
1522 #define BB_TO_VISIT BB_VISITED
1524 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1526 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1527 subloop_node->bb->flags |= BB_TO_VISIT;
1530 topsort_nodes.create (n_loop_preorder);
1531 dfs_stack.create (n_loop_preorder);
1533 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1535 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1536 continue;
1538 subloop_node->bb->flags &= ~BB_TO_VISIT;
1539 dfs_stack.quick_push (subloop_node);
1540 while (! dfs_stack.is_empty ())
1542 edge e;
1543 edge_iterator ei;
1545 ira_loop_tree_node_t n = dfs_stack.last ();
1546 FOR_EACH_EDGE (e, ei, n->bb->preds)
1548 ira_loop_tree_node_t pred_node;
1549 basic_block pred_bb = e->src;
1551 if (e->src == ENTRY_BLOCK_PTR)
1552 continue;
1554 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1555 if (pred_node != n
1556 && (pred_node->bb->flags & BB_TO_VISIT))
1558 pred_node->bb->flags &= ~BB_TO_VISIT;
1559 dfs_stack.quick_push (pred_node);
1562 if (n == dfs_stack.last ())
1564 dfs_stack.pop ();
1565 topsort_nodes.quick_push (n);
1570 #undef BB_TO_VISIT
1571 dfs_stack.release ();
1574 gcc_assert (topsort_nodes.length () == n_loop_preorder);
1575 return topsort_nodes;
1578 /* The current loop tree node and its regno allocno map. */
1579 ira_loop_tree_node_t ira_curr_loop_tree_node;
1580 ira_allocno_t *ira_curr_regno_allocno_map;
1582 /* This recursive function traverses loop tree with root LOOP_NODE
1583 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1584 correspondingly in preorder and postorder. The function sets up
1585 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1586 basic block nodes of LOOP_NODE is also processed (before its
1587 subloop nodes).
1589 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1590 the loop are passed in the *reverse* post-order of the *reverse*
1591 CFG. This is only used by ira_create_allocno_live_ranges, which
1592 wants to visit basic blocks in this order to minimize the number
1593 of elements per live range chain.
1594 Note that the loop tree nodes are still visited in the normal,
1595 forward post-order of the loop tree. */
1597 void
1598 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1599 void (*preorder_func) (ira_loop_tree_node_t),
1600 void (*postorder_func) (ira_loop_tree_node_t))
1602 ira_loop_tree_node_t subloop_node;
1604 ira_assert (loop_node->bb == NULL);
1605 ira_curr_loop_tree_node = loop_node;
1606 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1608 if (preorder_func != NULL)
1609 (*preorder_func) (loop_node);
1611 if (bb_p)
1613 vec<ira_loop_tree_node_t>
1614 loop_preorder = vNULL;
1615 unsigned int i;
1617 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1618 is set up such that nodes in the loop body appear in a pre-order
1619 of their place in the CFG. */
1620 for (subloop_node = loop_node->children;
1621 subloop_node != NULL;
1622 subloop_node = subloop_node->next)
1623 if (subloop_node->bb != NULL)
1624 loop_preorder.safe_push (subloop_node);
1626 if (preorder_func != NULL)
1627 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1628 (*preorder_func) (subloop_node);
1630 if (postorder_func != NULL)
1632 vec<ira_loop_tree_node_t> loop_rev_postorder =
1633 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1634 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1635 (*postorder_func) (subloop_node);
1636 loop_rev_postorder.release ();
1639 loop_preorder.release ();
1642 for (subloop_node = loop_node->subloops;
1643 subloop_node != NULL;
1644 subloop_node = subloop_node->subloop_next)
1646 ira_assert (subloop_node->bb == NULL);
1647 ira_traverse_loop_tree (bb_p, subloop_node,
1648 preorder_func, postorder_func);
1651 ira_curr_loop_tree_node = loop_node;
1652 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1654 if (postorder_func != NULL)
1655 (*postorder_func) (loop_node);
1660 /* The basic block currently being processed. */
1661 static basic_block curr_bb;
1663 /* This recursive function creates allocnos corresponding to
1664 pseudo-registers containing in X. True OUTPUT_P means that X is
1665 a lvalue. */
1666 static void
1667 create_insn_allocnos (rtx x, bool output_p)
1669 int i, j;
1670 const char *fmt;
1671 enum rtx_code code = GET_CODE (x);
1673 if (code == REG)
1675 int regno;
1677 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1679 ira_allocno_t a;
1681 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1682 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1684 ALLOCNO_NREFS (a)++;
1685 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1686 if (output_p)
1687 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1689 return;
1691 else if (code == SET)
1693 create_insn_allocnos (SET_DEST (x), true);
1694 create_insn_allocnos (SET_SRC (x), false);
1695 return;
1697 else if (code == CLOBBER)
1699 create_insn_allocnos (XEXP (x, 0), true);
1700 return;
1702 else if (code == MEM)
1704 create_insn_allocnos (XEXP (x, 0), false);
1705 return;
1707 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1708 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1710 create_insn_allocnos (XEXP (x, 0), true);
1711 create_insn_allocnos (XEXP (x, 0), false);
1712 return;
1715 fmt = GET_RTX_FORMAT (code);
1716 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1718 if (fmt[i] == 'e')
1719 create_insn_allocnos (XEXP (x, i), output_p);
1720 else if (fmt[i] == 'E')
1721 for (j = 0; j < XVECLEN (x, i); j++)
1722 create_insn_allocnos (XVECEXP (x, i, j), output_p);
1726 /* Create allocnos corresponding to pseudo-registers living in the
1727 basic block represented by the corresponding loop tree node
1728 BB_NODE. */
1729 static void
1730 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1732 basic_block bb;
1733 rtx insn;
1734 unsigned int i;
1735 bitmap_iterator bi;
1737 curr_bb = bb = bb_node->bb;
1738 ira_assert (bb != NULL);
1739 FOR_BB_INSNS_REVERSE (bb, insn)
1740 if (NONDEBUG_INSN_P (insn))
1741 create_insn_allocnos (PATTERN (insn), false);
1742 /* It might be a allocno living through from one subloop to
1743 another. */
1744 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1745 if (ira_curr_regno_allocno_map[i] == NULL)
1746 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1749 /* Create allocnos corresponding to pseudo-registers living on edge E
1750 (a loop entry or exit). Also mark the allocnos as living on the
1751 loop border. */
1752 static void
1753 create_loop_allocnos (edge e)
1755 unsigned int i;
1756 bitmap live_in_regs, border_allocnos;
1757 bitmap_iterator bi;
1758 ira_loop_tree_node_t parent;
1760 live_in_regs = df_get_live_in (e->dest);
1761 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1762 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1763 FIRST_PSEUDO_REGISTER, i, bi)
1764 if (bitmap_bit_p (live_in_regs, i))
1766 if (ira_curr_regno_allocno_map[i] == NULL)
1768 /* The order of creations is important for right
1769 ira_regno_allocno_map. */
1770 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1771 && parent->regno_allocno_map[i] == NULL)
1772 ira_create_allocno (i, false, parent);
1773 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1775 bitmap_set_bit (border_allocnos,
1776 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1780 /* Create allocnos corresponding to pseudo-registers living in loop
1781 represented by the corresponding loop tree node LOOP_NODE. This
1782 function is called by ira_traverse_loop_tree. */
1783 static void
1784 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1786 if (loop_node->bb != NULL)
1787 create_bb_allocnos (loop_node);
1788 else if (loop_node != ira_loop_tree_root)
1790 int i;
1791 edge_iterator ei;
1792 edge e;
1793 vec<edge> edges;
1795 ira_assert (current_loops != NULL);
1796 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1797 if (e->src != loop_node->loop->latch)
1798 create_loop_allocnos (e);
1800 edges = get_loop_exit_edges (loop_node->loop);
1801 FOR_EACH_VEC_ELT (edges, i, e)
1802 create_loop_allocnos (e);
1803 edges.release ();
1807 /* Propagate information about allocnos modified inside the loop given
1808 by its LOOP_TREE_NODE to its parent. */
1809 static void
1810 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1812 if (loop_tree_node == ira_loop_tree_root)
1813 return;
1814 ira_assert (loop_tree_node->bb == NULL);
1815 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1816 loop_tree_node->modified_regnos);
1819 /* Propagate new info about allocno A (see comments about accumulated
1820 info in allocno definition) to the corresponding allocno on upper
1821 loop tree level. So allocnos on upper levels accumulate
1822 information about the corresponding allocnos in nested regions.
1823 The new info means allocno info finally calculated in this
1824 file. */
1825 static void
1826 propagate_allocno_info (void)
1828 int i;
1829 ira_allocno_t a, parent_a;
1830 ira_loop_tree_node_t parent;
1831 enum reg_class aclass;
1833 if (flag_ira_region != IRA_REGION_ALL
1834 && flag_ira_region != IRA_REGION_MIXED)
1835 return;
1836 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1837 for (a = ira_regno_allocno_map[i];
1838 a != NULL;
1839 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1840 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1841 && (parent_a = parent->regno_allocno_map[i]) != NULL
1842 /* There are no caps yet at this point. So use
1843 border_allocnos to find allocnos for the propagation. */
1844 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1845 ALLOCNO_NUM (a)))
1847 if (! ALLOCNO_BAD_SPILL_P (a))
1848 ALLOCNO_BAD_SPILL_P (parent_a) = false;
1849 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1850 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1851 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1852 merge_hard_reg_conflicts (a, parent_a, true);
1853 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1854 += ALLOCNO_CALLS_CROSSED_NUM (a);
1855 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
1856 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
1857 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1858 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1859 aclass = ALLOCNO_CLASS (a);
1860 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
1861 ira_allocate_and_accumulate_costs
1862 (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
1863 ALLOCNO_HARD_REG_COSTS (a));
1864 ira_allocate_and_accumulate_costs
1865 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1866 aclass,
1867 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1868 ALLOCNO_CLASS_COST (parent_a)
1869 += ALLOCNO_CLASS_COST (a);
1870 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1874 /* Create allocnos corresponding to pseudo-registers in the current
1875 function. Traverse the loop tree for this. */
1876 static void
1877 create_allocnos (void)
1879 /* We need to process BB first to correctly link allocnos by member
1880 next_regno_allocno. */
1881 ira_traverse_loop_tree (true, ira_loop_tree_root,
1882 create_loop_tree_node_allocnos, NULL);
1883 if (optimize)
1884 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1885 propagate_modified_regnos);
1890 /* The page contains function to remove some regions from a separate
1891 register allocation. We remove regions whose separate allocation
1892 will hardly improve the result. As a result we speed up regional
1893 register allocation. */
1895 /* The function changes the object in range list given by R to OBJ. */
1896 static void
1897 change_object_in_range_list (live_range_t r, ira_object_t obj)
1899 for (; r != NULL; r = r->next)
1900 r->object = obj;
1903 /* Move all live ranges associated with allocno FROM to allocno TO. */
1904 static void
1905 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1907 int i;
1908 int n = ALLOCNO_NUM_OBJECTS (from);
1910 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1912 for (i = 0; i < n; i++)
1914 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1915 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1916 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1918 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1920 fprintf (ira_dump_file,
1921 " Moving ranges of a%dr%d to a%dr%d: ",
1922 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1923 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1924 ira_print_live_range_list (ira_dump_file, lr);
1926 change_object_in_range_list (lr, to_obj);
1927 OBJECT_LIVE_RANGES (to_obj)
1928 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1929 OBJECT_LIVE_RANGES (from_obj) = NULL;
1933 static void
1934 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1936 int i;
1937 int n = ALLOCNO_NUM_OBJECTS (from);
1939 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1941 for (i = 0; i < n; i++)
1943 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1944 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1945 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1947 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1949 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
1950 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1951 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1952 ira_print_live_range_list (ira_dump_file, lr);
1954 lr = ira_copy_live_range_list (lr);
1955 change_object_in_range_list (lr, to_obj);
1956 OBJECT_LIVE_RANGES (to_obj)
1957 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1961 /* Return TRUE if NODE represents a loop with low register
1962 pressure. */
1963 static bool
1964 low_pressure_loop_node_p (ira_loop_tree_node_t node)
1966 int i;
1967 enum reg_class pclass;
1969 if (node->bb != NULL)
1970 return false;
1972 for (i = 0; i < ira_pressure_classes_num; i++)
1974 pclass = ira_pressure_classes[i];
1975 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
1976 && ira_class_hard_regs_num[pclass] > 1)
1977 return false;
1979 return true;
1982 #ifdef STACK_REGS
1983 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
1984 form a region from such loop if the target use stack register
1985 because reg-stack.c can not deal with such edges. */
1986 static bool
1987 loop_with_complex_edge_p (struct loop *loop)
1989 int i;
1990 edge_iterator ei;
1991 edge e;
1992 vec<edge> edges;
1993 bool res;
1995 FOR_EACH_EDGE (e, ei, loop->header->preds)
1996 if (e->flags & EDGE_EH)
1997 return true;
1998 edges = get_loop_exit_edges (loop);
1999 res = false;
2000 FOR_EACH_VEC_ELT (edges, i, e)
2001 if (e->flags & EDGE_COMPLEX)
2003 res = true;
2004 break;
2006 edges.release ();
2007 return res;
2009 #endif
2011 /* Sort loops for marking them for removal. We put already marked
2012 loops first, then less frequent loops next, and then outer loops
2013 next. */
2014 static int
2015 loop_compare_func (const void *v1p, const void *v2p)
2017 int diff;
2018 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2019 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2021 ira_assert (l1->parent != NULL && l2->parent != NULL);
2022 if (l1->to_remove_p && ! l2->to_remove_p)
2023 return -1;
2024 if (! l1->to_remove_p && l2->to_remove_p)
2025 return 1;
2026 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
2027 return diff;
2028 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2029 return diff;
2030 /* Make sorting stable. */
2031 return l1->loop_num - l2->loop_num;
2034 /* Mark loops which should be removed from regional allocation. We
2035 remove a loop with low register pressure inside another loop with
2036 register pressure. In this case a separate allocation of the loop
2037 hardly helps (for irregular register file architecture it could
2038 help by choosing a better hard register in the loop but we prefer
2039 faster allocation even in this case). We also remove cheap loops
2040 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2041 exit or enter edges are removed too because the allocation might
2042 require put pseudo moves on the EH edges (we could still do this
2043 for pseudos with caller saved hard registers in some cases but it
2044 is impossible to say here or during top-down allocation pass what
2045 hard register the pseudos get finally). */
2046 static void
2047 mark_loops_for_removal (void)
2049 int i, n;
2050 ira_loop_tree_node_t *sorted_loops;
2051 loop_p loop;
2053 ira_assert (current_loops != NULL);
2054 sorted_loops
2055 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2056 * number_of_loops (cfun));
2057 for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
2058 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2060 if (ira_loop_nodes[i].parent == NULL)
2062 /* Don't remove the root. */
2063 ira_loop_nodes[i].to_remove_p = false;
2064 continue;
2066 sorted_loops[n++] = &ira_loop_nodes[i];
2067 ira_loop_nodes[i].to_remove_p
2068 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2069 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
2070 #ifdef STACK_REGS
2071 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2072 #endif
2075 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2076 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
2078 sorted_loops[i]->to_remove_p = true;
2079 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2080 fprintf
2081 (ira_dump_file,
2082 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2083 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2084 sorted_loops[i]->loop->header->frequency,
2085 loop_depth (sorted_loops[i]->loop),
2086 low_pressure_loop_node_p (sorted_loops[i]->parent)
2087 && low_pressure_loop_node_p (sorted_loops[i])
2088 ? "low pressure" : "cheap loop");
2090 ira_free (sorted_loops);
2093 /* Mark all loops but root for removing. */
2094 static void
2095 mark_all_loops_for_removal (void)
2097 int i;
2098 loop_p loop;
2100 ira_assert (current_loops != NULL);
2101 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2102 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2104 if (ira_loop_nodes[i].parent == NULL)
2106 /* Don't remove the root. */
2107 ira_loop_nodes[i].to_remove_p = false;
2108 continue;
2110 ira_loop_nodes[i].to_remove_p = true;
2111 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2112 fprintf
2113 (ira_dump_file,
2114 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2115 ira_loop_nodes[i].loop_num,
2116 ira_loop_nodes[i].loop->header->index,
2117 ira_loop_nodes[i].loop->header->frequency,
2118 loop_depth (ira_loop_nodes[i].loop));
2122 /* Definition of vector of loop tree nodes. */
2124 /* Vec containing references to all removed loop tree nodes. */
2125 static vec<ira_loop_tree_node_t> removed_loop_vec;
2127 /* Vec containing references to all children of loop tree nodes. */
2128 static vec<ira_loop_tree_node_t> children_vec;
2130 /* Remove subregions of NODE if their separate allocation will not
2131 improve the result. */
2132 static void
2133 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2135 unsigned int start;
2136 bool remove_p;
2137 ira_loop_tree_node_t subnode;
2139 remove_p = node->to_remove_p;
2140 if (! remove_p)
2141 children_vec.safe_push (node);
2142 start = children_vec.length ();
2143 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2144 if (subnode->bb == NULL)
2145 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2146 else
2147 children_vec.safe_push (subnode);
2148 node->children = node->subloops = NULL;
2149 if (remove_p)
2151 removed_loop_vec.safe_push (node);
2152 return;
2154 while (children_vec.length () > start)
2156 subnode = children_vec.pop ();
2157 subnode->parent = node;
2158 subnode->next = node->children;
2159 node->children = subnode;
2160 if (subnode->bb == NULL)
2162 subnode->subloop_next = node->subloops;
2163 node->subloops = subnode;
2168 /* Return TRUE if NODE is inside PARENT. */
2169 static bool
2170 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2172 for (node = node->parent; node != NULL; node = node->parent)
2173 if (node == parent)
2174 return true;
2175 return false;
2178 /* Sort allocnos according to their order in regno allocno list. */
2179 static int
2180 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2182 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2183 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2184 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2185 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2187 if (loop_is_inside_p (n1, n2))
2188 return -1;
2189 else if (loop_is_inside_p (n2, n1))
2190 return 1;
2191 /* If allocnos are equally good, sort by allocno numbers, so that
2192 the results of qsort leave nothing to chance. We put allocnos
2193 with higher number first in the list because it is the original
2194 order for allocnos from loops on the same levels. */
2195 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2198 /* This array is used to sort allocnos to restore allocno order in
2199 the regno allocno list. */
2200 static ira_allocno_t *regno_allocnos;
2202 /* Restore allocno order for REGNO in the regno allocno list. */
2203 static void
2204 ira_rebuild_regno_allocno_list (int regno)
2206 int i, n;
2207 ira_allocno_t a;
2209 for (n = 0, a = ira_regno_allocno_map[regno];
2210 a != NULL;
2211 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2212 regno_allocnos[n++] = a;
2213 ira_assert (n > 0);
2214 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2215 regno_allocno_order_compare_func);
2216 for (i = 1; i < n; i++)
2217 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2218 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2219 ira_regno_allocno_map[regno] = regno_allocnos[0];
2220 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2221 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2224 /* Propagate info from allocno FROM_A to allocno A. */
2225 static void
2226 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2228 enum reg_class aclass;
2230 merge_hard_reg_conflicts (from_a, a, false);
2231 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2232 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2233 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2234 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2235 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2236 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2237 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2238 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2239 if (! ALLOCNO_BAD_SPILL_P (from_a))
2240 ALLOCNO_BAD_SPILL_P (a) = false;
2241 aclass = ALLOCNO_CLASS (from_a);
2242 ira_assert (aclass == ALLOCNO_CLASS (a));
2243 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2244 ALLOCNO_HARD_REG_COSTS (from_a));
2245 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2246 aclass,
2247 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2248 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2249 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2252 /* Remove allocnos from loops removed from the allocation
2253 consideration. */
2254 static void
2255 remove_unnecessary_allocnos (void)
2257 int regno;
2258 bool merged_p, rebuild_p;
2259 ira_allocno_t a, prev_a, next_a, parent_a;
2260 ira_loop_tree_node_t a_node, parent;
2262 merged_p = false;
2263 regno_allocnos = NULL;
2264 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2266 rebuild_p = false;
2267 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2268 a != NULL;
2269 a = next_a)
2271 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2272 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2273 if (! a_node->to_remove_p)
2274 prev_a = a;
2275 else
2277 for (parent = a_node->parent;
2278 (parent_a = parent->regno_allocno_map[regno]) == NULL
2279 && parent->to_remove_p;
2280 parent = parent->parent)
2282 if (parent_a == NULL)
2284 /* There are no allocnos with the same regno in
2285 upper region -- just move the allocno to the
2286 upper region. */
2287 prev_a = a;
2288 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2289 parent->regno_allocno_map[regno] = a;
2290 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2291 rebuild_p = true;
2293 else
2295 /* Remove the allocno and update info of allocno in
2296 the upper region. */
2297 if (prev_a == NULL)
2298 ira_regno_allocno_map[regno] = next_a;
2299 else
2300 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2301 move_allocno_live_ranges (a, parent_a);
2302 merged_p = true;
2303 propagate_some_info_from_allocno (parent_a, a);
2304 /* Remove it from the corresponding regno allocno
2305 map to avoid info propagation of subsequent
2306 allocno into this already removed allocno. */
2307 a_node->regno_allocno_map[regno] = NULL;
2308 finish_allocno (a);
2312 if (rebuild_p)
2313 /* We need to restore the order in regno allocno list. */
2315 if (regno_allocnos == NULL)
2316 regno_allocnos
2317 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2318 * ira_allocnos_num);
2319 ira_rebuild_regno_allocno_list (regno);
2322 if (merged_p)
2323 ira_rebuild_start_finish_chains ();
2324 if (regno_allocnos != NULL)
2325 ira_free (regno_allocnos);
2328 /* Remove allocnos from all loops but the root. */
2329 static void
2330 remove_low_level_allocnos (void)
2332 int regno;
2333 bool merged_p, propagate_p;
2334 ira_allocno_t a, top_a;
2335 ira_loop_tree_node_t a_node, parent;
2336 ira_allocno_iterator ai;
2338 merged_p = false;
2339 FOR_EACH_ALLOCNO (a, ai)
2341 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2342 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2343 continue;
2344 regno = ALLOCNO_REGNO (a);
2345 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2347 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2348 ira_loop_tree_root->regno_allocno_map[regno] = a;
2349 continue;
2351 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2352 /* Remove the allocno and update info of allocno in the upper
2353 region. */
2354 move_allocno_live_ranges (a, top_a);
2355 merged_p = true;
2356 if (propagate_p)
2357 propagate_some_info_from_allocno (top_a, a);
2359 FOR_EACH_ALLOCNO (a, ai)
2361 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2362 if (a_node == ira_loop_tree_root)
2363 continue;
2364 parent = a_node->parent;
2365 regno = ALLOCNO_REGNO (a);
2366 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2367 ira_assert (ALLOCNO_CAP (a) != NULL);
2368 else if (ALLOCNO_CAP (a) == NULL)
2369 ira_assert (parent->regno_allocno_map[regno] != NULL);
2371 FOR_EACH_ALLOCNO (a, ai)
2373 regno = ALLOCNO_REGNO (a);
2374 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2376 ira_object_t obj;
2377 ira_allocno_object_iterator oi;
2379 ira_regno_allocno_map[regno] = a;
2380 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2381 ALLOCNO_CAP_MEMBER (a) = NULL;
2382 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2383 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2384 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2385 #ifdef STACK_REGS
2386 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2387 ALLOCNO_NO_STACK_REG_P (a) = true;
2388 #endif
2390 else
2391 finish_allocno (a);
2393 if (merged_p)
2394 ira_rebuild_start_finish_chains ();
2397 /* Remove loops from consideration. We remove all loops except for
2398 root if ALL_P or loops for which a separate allocation will not
2399 improve the result. We have to do this after allocno creation and
2400 their costs and allocno class evaluation because only after that
2401 the register pressure can be known and is calculated. */
2402 static void
2403 remove_unnecessary_regions (bool all_p)
2405 if (current_loops == NULL)
2406 return;
2407 if (all_p)
2408 mark_all_loops_for_removal ();
2409 else
2410 mark_loops_for_removal ();
2411 children_vec.create(last_basic_block + number_of_loops (cfun));
2412 removed_loop_vec.create(last_basic_block + number_of_loops (cfun));
2413 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2414 children_vec.release ();
2415 if (all_p)
2416 remove_low_level_allocnos ();
2417 else
2418 remove_unnecessary_allocnos ();
2419 while (removed_loop_vec.length () > 0)
2420 finish_loop_tree_node (removed_loop_vec.pop ());
2421 removed_loop_vec.release ();
2426 /* At this point true value of allocno attribute bad_spill_p means
2427 that there is an insn where allocno occurs and where the allocno
2428 can not be used as memory. The function updates the attribute, now
2429 it can be true only for allocnos which can not be used as memory in
2430 an insn and in whose live ranges there is other allocno deaths.
2431 Spilling allocnos with true value will not improve the code because
2432 it will not make other allocnos colorable and additional reloads
2433 for the corresponding pseudo will be generated in reload pass for
2434 each insn it occurs.
2436 This is a trick mentioned in one classic article of Chaitin etc
2437 which is frequently omitted in other implementations of RA based on
2438 graph coloring. */
2439 static void
2440 update_bad_spill_attribute (void)
2442 int i;
2443 ira_allocno_t a;
2444 ira_allocno_iterator ai;
2445 ira_allocno_object_iterator aoi;
2446 ira_object_t obj;
2447 live_range_t r;
2448 enum reg_class aclass;
2449 bitmap_head dead_points[N_REG_CLASSES];
2451 for (i = 0; i < ira_allocno_classes_num; i++)
2453 aclass = ira_allocno_classes[i];
2454 bitmap_initialize (&dead_points[aclass], &reg_obstack);
2456 FOR_EACH_ALLOCNO (a, ai)
2458 aclass = ALLOCNO_CLASS (a);
2459 if (aclass == NO_REGS)
2460 continue;
2461 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2462 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2463 bitmap_set_bit (&dead_points[aclass], r->finish);
2465 FOR_EACH_ALLOCNO (a, ai)
2467 aclass = ALLOCNO_CLASS (a);
2468 if (aclass == NO_REGS)
2469 continue;
2470 if (! ALLOCNO_BAD_SPILL_P (a))
2471 continue;
2472 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2474 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2476 for (i = r->start + 1; i < r->finish; i++)
2477 if (bitmap_bit_p (&dead_points[aclass], i))
2478 break;
2479 if (i < r->finish)
2480 break;
2482 if (r != NULL)
2484 ALLOCNO_BAD_SPILL_P (a) = false;
2485 break;
2489 for (i = 0; i < ira_allocno_classes_num; i++)
2491 aclass = ira_allocno_classes[i];
2492 bitmap_clear (&dead_points[aclass]);
2498 /* Set up minimal and maximal live range points for allocnos. */
2499 static void
2500 setup_min_max_allocno_live_range_point (void)
2502 int i;
2503 ira_allocno_t a, parent_a, cap;
2504 ira_allocno_iterator ai;
2505 #ifdef ENABLE_IRA_CHECKING
2506 ira_object_iterator oi;
2507 ira_object_t obj;
2508 #endif
2509 live_range_t r;
2510 ira_loop_tree_node_t parent;
2512 FOR_EACH_ALLOCNO (a, ai)
2514 int n = ALLOCNO_NUM_OBJECTS (a);
2516 for (i = 0; i < n; i++)
2518 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2519 r = OBJECT_LIVE_RANGES (obj);
2520 if (r == NULL)
2521 continue;
2522 OBJECT_MAX (obj) = r->finish;
2523 for (; r->next != NULL; r = r->next)
2525 OBJECT_MIN (obj) = r->start;
2528 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2529 for (a = ira_regno_allocno_map[i];
2530 a != NULL;
2531 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2533 int j;
2534 int n = ALLOCNO_NUM_OBJECTS (a);
2536 for (j = 0; j < n; j++)
2538 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2539 ira_object_t parent_obj;
2541 if (OBJECT_MAX (obj) < 0)
2542 continue;
2543 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2544 /* Accumulation of range info. */
2545 if (ALLOCNO_CAP (a) != NULL)
2547 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2549 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2550 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2551 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2552 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2553 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2555 continue;
2557 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2558 continue;
2559 parent_a = parent->regno_allocno_map[i];
2560 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2561 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2562 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2563 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2564 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2567 #ifdef ENABLE_IRA_CHECKING
2568 FOR_EACH_OBJECT (obj, oi)
2570 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2571 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2572 continue;
2573 gcc_unreachable ();
2575 #endif
2578 /* Sort allocnos according to their live ranges. Allocnos with
2579 smaller allocno class are put first unless we use priority
2580 coloring. Allocnos with the same class are ordered according
2581 their start (min). Allocnos with the same start are ordered
2582 according their finish (max). */
2583 static int
2584 object_range_compare_func (const void *v1p, const void *v2p)
2586 int diff;
2587 ira_object_t obj1 = *(const ira_object_t *) v1p;
2588 ira_object_t obj2 = *(const ira_object_t *) v2p;
2589 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2590 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2592 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2593 return diff;
2594 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2595 return diff;
2596 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2599 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2600 static void
2601 sort_conflict_id_map (void)
2603 int i, num;
2604 ira_allocno_t a;
2605 ira_allocno_iterator ai;
2607 num = 0;
2608 FOR_EACH_ALLOCNO (a, ai)
2610 ira_allocno_object_iterator oi;
2611 ira_object_t obj;
2613 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2614 ira_object_id_map[num++] = obj;
2616 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2617 object_range_compare_func);
2618 for (i = 0; i < num; i++)
2620 ira_object_t obj = ira_object_id_map[i];
2622 gcc_assert (obj != NULL);
2623 OBJECT_CONFLICT_ID (obj) = i;
2625 for (i = num; i < ira_objects_num; i++)
2626 ira_object_id_map[i] = NULL;
2629 /* Set up minimal and maximal conflict ids of allocnos with which
2630 given allocno can conflict. */
2631 static void
2632 setup_min_max_conflict_allocno_ids (void)
2634 int aclass;
2635 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2636 int *live_range_min, *last_lived;
2637 int word0_min, word0_max;
2638 ira_allocno_t a;
2639 ira_allocno_iterator ai;
2641 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2642 aclass = -1;
2643 first_not_finished = -1;
2644 for (i = 0; i < ira_objects_num; i++)
2646 ira_object_t obj = ira_object_id_map[i];
2648 if (obj == NULL)
2649 continue;
2651 a = OBJECT_ALLOCNO (obj);
2653 if (aclass < 0)
2655 aclass = ALLOCNO_CLASS (a);
2656 min = i;
2657 first_not_finished = i;
2659 else
2661 start = OBJECT_MIN (obj);
2662 /* If we skip an allocno, the allocno with smaller ids will
2663 be also skipped because of the secondary sorting the
2664 range finishes (see function
2665 object_range_compare_func). */
2666 while (first_not_finished < i
2667 && start > OBJECT_MAX (ira_object_id_map
2668 [first_not_finished]))
2669 first_not_finished++;
2670 min = first_not_finished;
2672 if (min == i)
2673 /* We could increase min further in this case but it is good
2674 enough. */
2675 min++;
2676 live_range_min[i] = OBJECT_MIN (obj);
2677 OBJECT_MIN (obj) = min;
2679 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2680 aclass = -1;
2681 filled_area_start = -1;
2682 for (i = ira_objects_num - 1; i >= 0; i--)
2684 ira_object_t obj = ira_object_id_map[i];
2686 if (obj == NULL)
2687 continue;
2689 a = OBJECT_ALLOCNO (obj);
2690 if (aclass < 0)
2692 aclass = ALLOCNO_CLASS (a);
2693 for (j = 0; j < ira_max_point; j++)
2694 last_lived[j] = -1;
2695 filled_area_start = ira_max_point;
2697 min = live_range_min[i];
2698 finish = OBJECT_MAX (obj);
2699 max = last_lived[finish];
2700 if (max < 0)
2701 /* We could decrease max further in this case but it is good
2702 enough. */
2703 max = OBJECT_CONFLICT_ID (obj) - 1;
2704 OBJECT_MAX (obj) = max;
2705 /* In filling, we can go further A range finish to recognize
2706 intersection quickly because if the finish of subsequently
2707 processed allocno (it has smaller conflict id) range is
2708 further A range finish than they are definitely intersected
2709 (the reason for this is the allocnos with bigger conflict id
2710 have their range starts not smaller than allocnos with
2711 smaller ids. */
2712 for (j = min; j < filled_area_start; j++)
2713 last_lived[j] = i;
2714 filled_area_start = min;
2716 ira_free (last_lived);
2717 ira_free (live_range_min);
2719 /* For allocnos with more than one object, we may later record extra conflicts in
2720 subobject 0 that we cannot really know about here.
2721 For now, simply widen the min/max range of these subobjects. */
2723 word0_min = INT_MAX;
2724 word0_max = INT_MIN;
2726 FOR_EACH_ALLOCNO (a, ai)
2728 int n = ALLOCNO_NUM_OBJECTS (a);
2729 ira_object_t obj0;
2731 if (n < 2)
2732 continue;
2733 obj0 = ALLOCNO_OBJECT (a, 0);
2734 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2735 word0_min = OBJECT_CONFLICT_ID (obj0);
2736 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2737 word0_max = OBJECT_CONFLICT_ID (obj0);
2739 FOR_EACH_ALLOCNO (a, ai)
2741 int n = ALLOCNO_NUM_OBJECTS (a);
2742 ira_object_t obj0;
2744 if (n < 2)
2745 continue;
2746 obj0 = ALLOCNO_OBJECT (a, 0);
2747 if (OBJECT_MIN (obj0) > word0_min)
2748 OBJECT_MIN (obj0) = word0_min;
2749 if (OBJECT_MAX (obj0) < word0_max)
2750 OBJECT_MAX (obj0) = word0_max;
2756 static void
2757 create_caps (void)
2759 ira_allocno_t a;
2760 ira_allocno_iterator ai;
2761 ira_loop_tree_node_t loop_tree_node;
2763 FOR_EACH_ALLOCNO (a, ai)
2765 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2766 continue;
2767 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2768 create_cap_allocno (a);
2769 else if (ALLOCNO_CAP (a) == NULL)
2771 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2772 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2773 create_cap_allocno (a);
2780 /* The page contains code transforming more one region internal
2781 representation (IR) to one region IR which is necessary for reload.
2782 This transformation is called IR flattening. We might just rebuild
2783 the IR for one region but we don't do it because it takes a lot of
2784 time. */
2786 /* Map: regno -> allocnos which will finally represent the regno for
2787 IR with one region. */
2788 static ira_allocno_t *regno_top_level_allocno_map;
2790 /* Find the allocno that corresponds to A at a level one higher up in the
2791 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2792 ira_allocno_t
2793 ira_parent_allocno (ira_allocno_t a)
2795 ira_loop_tree_node_t parent;
2797 if (ALLOCNO_CAP (a) != NULL)
2798 return NULL;
2800 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2801 if (parent == NULL)
2802 return NULL;
2804 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
2807 /* Find the allocno that corresponds to A at a level one higher up in the
2808 loop tree. If ALLOCNO_CAP is set for A, return that. */
2809 ira_allocno_t
2810 ira_parent_or_cap_allocno (ira_allocno_t a)
2812 if (ALLOCNO_CAP (a) != NULL)
2813 return ALLOCNO_CAP (a);
2815 return ira_parent_allocno (a);
2818 /* Process all allocnos originated from pseudo REGNO and copy live
2819 ranges, hard reg conflicts, and allocno stack reg attributes from
2820 low level allocnos to final allocnos which are destinations of
2821 removed stores at a loop exit. Return true if we copied live
2822 ranges. */
2823 static bool
2824 copy_info_to_removed_store_destinations (int regno)
2826 ira_allocno_t a;
2827 ira_allocno_t parent_a = NULL;
2828 ira_loop_tree_node_t parent;
2829 bool merged_p;
2831 merged_p = false;
2832 for (a = ira_regno_allocno_map[regno];
2833 a != NULL;
2834 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2836 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
2837 /* This allocno will be removed. */
2838 continue;
2840 /* Caps will be removed. */
2841 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2842 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2843 parent != NULL;
2844 parent = parent->parent)
2845 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2846 || (parent_a
2847 == regno_top_level_allocno_map[REGNO
2848 (allocno_emit_reg (parent_a))]
2849 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
2850 break;
2851 if (parent == NULL || parent_a == NULL)
2852 continue;
2854 copy_allocno_live_ranges (a, parent_a);
2855 merge_hard_reg_conflicts (a, parent_a, true);
2857 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2858 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2859 += ALLOCNO_CALLS_CROSSED_NUM (a);
2860 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2861 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2862 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2863 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2864 merged_p = true;
2866 return merged_p;
2869 /* Flatten the IR. In other words, this function transforms IR as if
2870 it were built with one region (without loops). We could make it
2871 much simpler by rebuilding IR with one region, but unfortunately it
2872 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
2873 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2874 IRA_MAX_POINT before emitting insns on the loop borders. */
2875 void
2876 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2878 int i, j;
2879 bool keep_p;
2880 int hard_regs_num;
2881 bool new_pseudos_p, merged_p, mem_dest_p;
2882 unsigned int n;
2883 enum reg_class aclass;
2884 ira_allocno_t a, parent_a, first, second, node_first, node_second;
2885 ira_copy_t cp;
2886 ira_loop_tree_node_t node;
2887 live_range_t r;
2888 ira_allocno_iterator ai;
2889 ira_copy_iterator ci;
2891 regno_top_level_allocno_map
2892 = (ira_allocno_t *) ira_allocate (max_reg_num ()
2893 * sizeof (ira_allocno_t));
2894 memset (regno_top_level_allocno_map, 0,
2895 max_reg_num () * sizeof (ira_allocno_t));
2896 new_pseudos_p = merged_p = false;
2897 FOR_EACH_ALLOCNO (a, ai)
2899 ira_allocno_object_iterator oi;
2900 ira_object_t obj;
2902 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2903 /* Caps are not in the regno allocno maps and they are never
2904 will be transformed into allocnos existing after IR
2905 flattening. */
2906 continue;
2907 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2908 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2909 OBJECT_CONFLICT_HARD_REGS (obj));
2910 #ifdef STACK_REGS
2911 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2912 #endif
2914 /* Fix final allocno attributes. */
2915 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2917 mem_dest_p = false;
2918 for (a = ira_regno_allocno_map[i];
2919 a != NULL;
2920 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2922 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
2924 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2925 if (data->somewhere_renamed_p)
2926 new_pseudos_p = true;
2927 parent_a = ira_parent_allocno (a);
2928 if (parent_a == NULL)
2930 ALLOCNO_COPIES (a) = NULL;
2931 regno_top_level_allocno_map[REGNO (data->reg)] = a;
2932 continue;
2934 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2936 if (data->mem_optimized_dest != NULL)
2937 mem_dest_p = true;
2938 parent_data = ALLOCNO_EMIT_DATA (parent_a);
2939 if (REGNO (data->reg) == REGNO (parent_data->reg))
2941 merge_hard_reg_conflicts (a, parent_a, true);
2942 move_allocno_live_ranges (a, parent_a);
2943 merged_p = true;
2944 parent_data->mem_optimized_dest_p
2945 = (parent_data->mem_optimized_dest_p
2946 || data->mem_optimized_dest_p);
2947 continue;
2949 new_pseudos_p = true;
2950 for (;;)
2952 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2953 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2954 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2955 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2956 -= ALLOCNO_CALLS_CROSSED_NUM (a);
2957 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2958 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2959 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2960 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2961 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2962 && ALLOCNO_NREFS (parent_a) >= 0
2963 && ALLOCNO_FREQ (parent_a) >= 0);
2964 aclass = ALLOCNO_CLASS (parent_a);
2965 hard_regs_num = ira_class_hard_regs_num[aclass];
2966 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2967 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2968 for (j = 0; j < hard_regs_num; j++)
2969 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2970 -= ALLOCNO_HARD_REG_COSTS (a)[j];
2971 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2972 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2973 for (j = 0; j < hard_regs_num; j++)
2974 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2975 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2976 ALLOCNO_CLASS_COST (parent_a)
2977 -= ALLOCNO_CLASS_COST (a);
2978 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2979 parent_a = ira_parent_allocno (parent_a);
2980 if (parent_a == NULL)
2981 break;
2983 ALLOCNO_COPIES (a) = NULL;
2984 regno_top_level_allocno_map[REGNO (data->reg)] = a;
2986 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2987 merged_p = true;
2989 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2990 if (merged_p || ira_max_point_before_emit != ira_max_point)
2991 ira_rebuild_start_finish_chains ();
2992 if (new_pseudos_p)
2994 sparseset objects_live;
2996 /* Rebuild conflicts. */
2997 FOR_EACH_ALLOCNO (a, ai)
2999 ira_allocno_object_iterator oi;
3000 ira_object_t obj;
3002 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3003 || ALLOCNO_CAP_MEMBER (a) != NULL)
3004 continue;
3005 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3007 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3008 ira_assert (r->object == obj);
3009 clear_conflicts (obj);
3012 objects_live = sparseset_alloc (ira_objects_num);
3013 for (i = 0; i < ira_max_point; i++)
3015 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3017 ira_object_t obj = r->object;
3019 a = OBJECT_ALLOCNO (obj);
3020 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3021 || ALLOCNO_CAP_MEMBER (a) != NULL)
3022 continue;
3024 aclass = ALLOCNO_CLASS (a);
3025 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
3026 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3028 ira_object_t live_obj = ira_object_id_map[n];
3029 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3030 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3032 if (ira_reg_classes_intersect_p[aclass][live_aclass]
3033 /* Don't set up conflict for the allocno with itself. */
3034 && live_a != a)
3035 ira_add_conflict (obj, live_obj);
3039 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3040 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3042 sparseset_free (objects_live);
3043 compress_conflict_vecs ();
3045 /* Mark some copies for removing and change allocnos in the rest
3046 copies. */
3047 FOR_EACH_COPY (cp, ci)
3049 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3050 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3052 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3053 fprintf
3054 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3055 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3056 ALLOCNO_NUM (cp->first),
3057 REGNO (allocno_emit_reg (cp->first)),
3058 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3059 ALLOCNO_NUM (cp->second),
3060 REGNO (allocno_emit_reg (cp->second)));
3061 cp->loop_tree_node = NULL;
3062 continue;
3064 first
3065 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3066 second
3067 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3068 node = cp->loop_tree_node;
3069 if (node == NULL)
3070 keep_p = true; /* It copy generated in ira-emit.c. */
3071 else
3073 /* Check that the copy was not propagated from level on
3074 which we will have different pseudos. */
3075 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3076 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3077 keep_p = ((REGNO (allocno_emit_reg (first))
3078 == REGNO (allocno_emit_reg (node_first)))
3079 && (REGNO (allocno_emit_reg (second))
3080 == REGNO (allocno_emit_reg (node_second))));
3082 if (keep_p)
3084 cp->loop_tree_node = ira_loop_tree_root;
3085 cp->first = first;
3086 cp->second = second;
3088 else
3090 cp->loop_tree_node = NULL;
3091 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3092 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
3093 cp->num, ALLOCNO_NUM (cp->first),
3094 REGNO (allocno_emit_reg (cp->first)),
3095 ALLOCNO_NUM (cp->second),
3096 REGNO (allocno_emit_reg (cp->second)));
3099 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3100 FOR_EACH_ALLOCNO (a, ai)
3102 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3103 || ALLOCNO_CAP_MEMBER (a) != NULL)
3105 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3106 fprintf (ira_dump_file, " Remove a%dr%d\n",
3107 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3108 finish_allocno (a);
3109 continue;
3111 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3112 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3113 ALLOCNO_CAP (a) = NULL;
3114 /* Restore updated costs for assignments from reload. */
3115 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3116 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3117 if (! ALLOCNO_ASSIGNED_P (a))
3118 ira_free_allocno_updated_costs (a);
3119 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3120 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3122 /* Remove unnecessary copies. */
3123 FOR_EACH_COPY (cp, ci)
3125 if (cp->loop_tree_node == NULL)
3127 ira_copies[cp->num] = NULL;
3128 finish_copy (cp);
3129 continue;
3131 ira_assert
3132 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3133 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3134 ira_add_allocno_copy_to_list (cp);
3135 ira_swap_allocno_copy_ends_if_necessary (cp);
3137 rebuild_regno_allocno_maps ();
3138 if (ira_max_point != ira_max_point_before_emit)
3139 ira_compress_allocno_live_ranges ();
3140 ira_free (regno_top_level_allocno_map);
3145 #ifdef ENABLE_IRA_CHECKING
3146 /* Check creation of all allocnos. Allocnos on lower levels should
3147 have allocnos or caps on all upper levels. */
3148 static void
3149 check_allocno_creation (void)
3151 ira_allocno_t a;
3152 ira_allocno_iterator ai;
3153 ira_loop_tree_node_t loop_tree_node;
3155 FOR_EACH_ALLOCNO (a, ai)
3157 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3158 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3159 ALLOCNO_NUM (a)));
3160 if (loop_tree_node == ira_loop_tree_root)
3161 continue;
3162 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3163 ira_assert (ALLOCNO_CAP (a) != NULL);
3164 else if (ALLOCNO_CAP (a) == NULL)
3165 ira_assert (loop_tree_node->parent
3166 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3167 && bitmap_bit_p (loop_tree_node->border_allocnos,
3168 ALLOCNO_NUM (a)));
3171 #endif
3173 /* Identify allocnos which prefer a register class with a single hard register.
3174 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3175 less likely to use the preferred singleton register. */
3176 static void
3177 update_conflict_hard_reg_costs (void)
3179 ira_allocno_t a;
3180 ira_allocno_iterator ai;
3181 int i, index, min;
3183 FOR_EACH_ALLOCNO (a, ai)
3185 reg_class_t aclass = ALLOCNO_CLASS (a);
3186 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3187 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3188 if (singleton < 0)
3189 continue;
3190 index = ira_class_hard_reg_index[(int) aclass][singleton];
3191 if (index < 0)
3192 continue;
3193 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3194 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3195 continue;
3196 min = INT_MAX;
3197 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3198 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3199 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3200 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3201 if (min == INT_MAX)
3202 continue;
3203 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3204 aclass, 0);
3205 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3206 -= min - ALLOCNO_CLASS_COST (a);
3210 /* Create a internal representation (IR) for IRA (allocnos, copies,
3211 loop tree nodes). The function returns TRUE if we generate loop
3212 structure (besides nodes representing all function and the basic
3213 blocks) for regional allocation. A true return means that we
3214 really need to flatten IR before the reload. */
3215 bool
3216 ira_build (void)
3218 bool loops_p;
3220 df_analyze ();
3221 initiate_cost_vectors ();
3222 initiate_allocnos ();
3223 initiate_copies ();
3224 create_loop_tree_nodes ();
3225 form_loop_tree ();
3226 create_allocnos ();
3227 ira_costs ();
3228 create_allocno_objects ();
3229 ira_create_allocno_live_ranges ();
3230 remove_unnecessary_regions (false);
3231 ira_compress_allocno_live_ranges ();
3232 update_bad_spill_attribute ();
3233 loops_p = more_one_region_p ();
3234 if (loops_p)
3236 propagate_allocno_info ();
3237 create_caps ();
3239 ira_tune_allocno_costs ();
3240 #ifdef ENABLE_IRA_CHECKING
3241 check_allocno_creation ();
3242 #endif
3243 setup_min_max_allocno_live_range_point ();
3244 sort_conflict_id_map ();
3245 setup_min_max_conflict_allocno_ids ();
3246 ira_build_conflicts ();
3247 update_conflict_hard_reg_costs ();
3248 if (! ira_conflicts_p)
3250 ira_allocno_t a;
3251 ira_allocno_iterator ai;
3253 /* Remove all regions but root one. */
3254 if (loops_p)
3256 remove_unnecessary_regions (true);
3257 loops_p = false;
3259 /* We don't save hard registers around calls for fast allocation
3260 -- add caller clobbered registers as conflicting ones to
3261 allocno crossing calls. */
3262 FOR_EACH_ALLOCNO (a, ai)
3263 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3264 ior_hard_reg_conflicts (a, &call_used_reg_set);
3266 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3267 print_copies (ira_dump_file);
3268 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3270 int n, nr, nr_big;
3271 ira_allocno_t a;
3272 live_range_t r;
3273 ira_allocno_iterator ai;
3275 n = 0;
3276 nr = 0;
3277 nr_big = 0;
3278 FOR_EACH_ALLOCNO (a, ai)
3280 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3282 if (nobj > 1)
3283 nr_big++;
3284 for (j = 0; j < nobj; j++)
3286 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3287 n += OBJECT_NUM_CONFLICTS (obj);
3288 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3289 nr++;
3292 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
3293 current_loops == NULL ? 1 : number_of_loops (cfun),
3294 n_basic_blocks, ira_max_point);
3295 fprintf (ira_dump_file,
3296 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3297 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3299 return loops_p;
3302 /* Release the data created by function ira_build. */
3303 void
3304 ira_destroy (void)
3306 finish_loop_tree_nodes ();
3307 finish_copies ();
3308 finish_allocnos ();
3309 finish_cost_vectors ();
3310 ira_finish_allocno_live_ranges ();