1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "insn-config.h"
34 #include "diagnostic-core.h"
38 #include "sparseset.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 static ira_copy_t
find_allocno_copy (ira_allocno_t
, ira_allocno_t
, rtx
,
43 ira_loop_tree_node_t
);
45 /* The root of the loop tree corresponding to the all function. */
46 ira_loop_tree_node_t ira_loop_tree_root
;
48 /* Height of the loop tree. */
49 int ira_loop_tree_height
;
51 /* All nodes representing basic blocks are referred through the
52 following array. We can not use basic block member `aux' for this
53 because it is used for insertion of insns on edges. */
54 ira_loop_tree_node_t ira_bb_nodes
;
56 /* All nodes representing loops are referred through the following
58 ira_loop_tree_node_t ira_loop_nodes
;
60 /* And size of the ira_loop_nodes array. */
61 unsigned int ira_loop_nodes_count
;
63 /* Map regno -> allocnos with given regno (see comments for
64 allocno member `next_regno_allocno'). */
65 ira_allocno_t
*ira_regno_allocno_map
;
67 /* Array of references to all allocnos. The order number of the
68 allocno corresponds to the index in the array. Removed allocnos
69 have NULL element value. */
70 ira_allocno_t
*ira_allocnos
;
72 /* Sizes of the previous array. */
75 /* Count of conflict record structures we've created, used when creating
79 /* Map a conflict id to its conflict record. */
80 ira_object_t
*ira_object_id_map
;
82 /* Array of references to all allocno preferences. The order number
83 of the preference corresponds to the index in the array. */
84 ira_pref_t
*ira_prefs
;
86 /* Size of the previous array. */
89 /* Array of references to all copies. The order number of the copy
90 corresponds to the index in the array. Removed copies have NULL
92 ira_copy_t
*ira_copies
;
94 /* Size of the previous array. */
99 /* LAST_BASIC_BLOCK before generating additional insns because of live
100 range splitting. Emitting insns on a critical edge creates a new
102 static int last_basic_block_before_change
;
104 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
105 the member loop_num. */
107 init_loop_tree_node (struct ira_loop_tree_node
*node
, int loop_num
)
109 int max_regno
= max_reg_num ();
111 node
->regno_allocno_map
112 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
) * max_regno
);
113 memset (node
->regno_allocno_map
, 0, sizeof (ira_allocno_t
) * max_regno
);
114 memset (node
->reg_pressure
, 0, sizeof (node
->reg_pressure
));
115 node
->all_allocnos
= ira_allocate_bitmap ();
116 node
->modified_regnos
= ira_allocate_bitmap ();
117 node
->border_allocnos
= ira_allocate_bitmap ();
118 node
->local_copies
= ira_allocate_bitmap ();
119 node
->loop_num
= loop_num
;
120 node
->children
= NULL
;
121 node
->subloops
= NULL
;
125 /* The following function allocates the loop tree nodes. If
126 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
127 the root which corresponds the all function) will be not allocated
128 but nodes will still be allocated for basic blocks. */
130 create_loop_tree_nodes (void)
140 = ((struct ira_loop_tree_node
*)
141 ira_allocate (sizeof (struct ira_loop_tree_node
)
142 * last_basic_block_for_fn (cfun
)));
143 last_basic_block_before_change
= last_basic_block_for_fn (cfun
);
144 for (i
= 0; i
< (unsigned int) last_basic_block_for_fn (cfun
); i
++)
146 ira_bb_nodes
[i
].regno_allocno_map
= NULL
;
147 memset (ira_bb_nodes
[i
].reg_pressure
, 0,
148 sizeof (ira_bb_nodes
[i
].reg_pressure
));
149 ira_bb_nodes
[i
].all_allocnos
= NULL
;
150 ira_bb_nodes
[i
].modified_regnos
= NULL
;
151 ira_bb_nodes
[i
].border_allocnos
= NULL
;
152 ira_bb_nodes
[i
].local_copies
= NULL
;
154 if (current_loops
== NULL
)
156 ira_loop_nodes_count
= 1;
157 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
158 ira_allocate (sizeof (struct ira_loop_tree_node
)));
159 init_loop_tree_node (ira_loop_nodes
, 0);
162 ira_loop_nodes_count
= number_of_loops (cfun
);
163 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
164 ira_allocate (sizeof (struct ira_loop_tree_node
)
165 * ira_loop_nodes_count
));
166 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
168 if (loop_outer (loop
) != NULL
)
170 ira_loop_nodes
[i
].regno_allocno_map
= NULL
;
172 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
173 if (e
->src
!= loop
->latch
174 && (e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
181 edges
= get_loop_exit_edges (loop
);
182 FOR_EACH_VEC_ELT (edges
, j
, e
)
183 if ((e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
192 init_loop_tree_node (&ira_loop_nodes
[i
], loop
->num
);
196 /* The function returns TRUE if there are more one allocation
199 more_one_region_p (void)
204 if (current_loops
!= NULL
)
205 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
206 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
207 && ira_loop_tree_root
!= &ira_loop_nodes
[i
])
212 /* Free the loop tree node of a loop. */
214 finish_loop_tree_node (ira_loop_tree_node_t loop
)
216 if (loop
->regno_allocno_map
!= NULL
)
218 ira_assert (loop
->bb
== NULL
);
219 ira_free_bitmap (loop
->local_copies
);
220 ira_free_bitmap (loop
->border_allocnos
);
221 ira_free_bitmap (loop
->modified_regnos
);
222 ira_free_bitmap (loop
->all_allocnos
);
223 ira_free (loop
->regno_allocno_map
);
224 loop
->regno_allocno_map
= NULL
;
228 /* Free the loop tree nodes. */
230 finish_loop_tree_nodes (void)
234 for (i
= 0; i
< ira_loop_nodes_count
; i
++)
235 finish_loop_tree_node (&ira_loop_nodes
[i
]);
236 ira_free (ira_loop_nodes
);
237 for (i
= 0; i
< (unsigned int) last_basic_block_before_change
; i
++)
239 if (ira_bb_nodes
[i
].local_copies
!= NULL
)
240 ira_free_bitmap (ira_bb_nodes
[i
].local_copies
);
241 if (ira_bb_nodes
[i
].border_allocnos
!= NULL
)
242 ira_free_bitmap (ira_bb_nodes
[i
].border_allocnos
);
243 if (ira_bb_nodes
[i
].modified_regnos
!= NULL
)
244 ira_free_bitmap (ira_bb_nodes
[i
].modified_regnos
);
245 if (ira_bb_nodes
[i
].all_allocnos
!= NULL
)
246 ira_free_bitmap (ira_bb_nodes
[i
].all_allocnos
);
247 if (ira_bb_nodes
[i
].regno_allocno_map
!= NULL
)
248 ira_free (ira_bb_nodes
[i
].regno_allocno_map
);
250 ira_free (ira_bb_nodes
);
255 /* The following recursive function adds LOOP to the loop tree
256 hierarchy. LOOP is added only once. If LOOP is NULL we adding
257 loop designating the whole function when CFG loops are not
260 add_loop_to_tree (struct loop
*loop
)
264 ira_loop_tree_node_t loop_node
, parent_node
;
266 /* We can not use loop node access macros here because of potential
267 checking and because the nodes are not initialized enough
269 if (loop
!= NULL
&& loop_outer (loop
) != NULL
)
270 add_loop_to_tree (loop_outer (loop
));
271 loop_num
= loop
!= NULL
? loop
->num
: 0;
272 if (ira_loop_nodes
[loop_num
].regno_allocno_map
!= NULL
273 && ira_loop_nodes
[loop_num
].children
== NULL
)
275 /* We have not added loop node to the tree yet. */
276 loop_node
= &ira_loop_nodes
[loop_num
];
277 loop_node
->loop
= loop
;
278 loop_node
->bb
= NULL
;
283 for (parent
= loop_outer (loop
);
285 parent
= loop_outer (parent
))
286 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
291 loop_node
->next
= NULL
;
292 loop_node
->subloop_next
= NULL
;
293 loop_node
->parent
= NULL
;
297 parent_node
= &ira_loop_nodes
[parent
->num
];
298 loop_node
->next
= parent_node
->children
;
299 parent_node
->children
= loop_node
;
300 loop_node
->subloop_next
= parent_node
->subloops
;
301 parent_node
->subloops
= loop_node
;
302 loop_node
->parent
= parent_node
;
307 /* The following recursive function sets up levels of nodes of the
308 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
309 The function returns maximal value of level in the tree + 1. */
311 setup_loop_tree_level (ira_loop_tree_node_t loop_node
, int level
)
313 int height
, max_height
;
314 ira_loop_tree_node_t subloop_node
;
316 ira_assert (loop_node
->bb
== NULL
);
317 loop_node
->level
= level
;
318 max_height
= level
+ 1;
319 for (subloop_node
= loop_node
->subloops
;
320 subloop_node
!= NULL
;
321 subloop_node
= subloop_node
->subloop_next
)
323 ira_assert (subloop_node
->bb
== NULL
);
324 height
= setup_loop_tree_level (subloop_node
, level
+ 1);
325 if (height
> max_height
)
331 /* Create the loop tree. The algorithm is designed to provide correct
332 order of loops (they are ordered by their last loop BB) and basic
333 blocks in the chain formed by member next. */
335 form_loop_tree (void)
339 ira_loop_tree_node_t bb_node
, loop_node
;
341 /* We can not use loop/bb node access macros because of potential
342 checking and because the nodes are not initialized enough
344 FOR_EACH_BB_FN (bb
, cfun
)
346 bb_node
= &ira_bb_nodes
[bb
->index
];
348 bb_node
->loop
= NULL
;
349 bb_node
->subloops
= NULL
;
350 bb_node
->children
= NULL
;
351 bb_node
->subloop_next
= NULL
;
352 bb_node
->next
= NULL
;
353 if (current_loops
== NULL
)
357 for (parent
= bb
->loop_father
;
359 parent
= loop_outer (parent
))
360 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
363 add_loop_to_tree (parent
);
364 loop_node
= &ira_loop_nodes
[parent
== NULL
? 0 : parent
->num
];
365 bb_node
->next
= loop_node
->children
;
366 bb_node
->parent
= loop_node
;
367 loop_node
->children
= bb_node
;
369 ira_loop_tree_root
= IRA_LOOP_NODE_BY_INDEX (0);
370 ira_loop_tree_height
= setup_loop_tree_level (ira_loop_tree_root
, 0);
371 ira_assert (ira_loop_tree_root
->regno_allocno_map
!= NULL
);
376 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
379 rebuild_regno_allocno_maps (void)
382 int max_regno
, regno
;
384 ira_loop_tree_node_t loop_tree_node
;
386 ira_allocno_iterator ai
;
388 ira_assert (current_loops
!= NULL
);
389 max_regno
= max_reg_num ();
390 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), l
, loop
)
391 if (ira_loop_nodes
[l
].regno_allocno_map
!= NULL
)
393 ira_free (ira_loop_nodes
[l
].regno_allocno_map
);
394 ira_loop_nodes
[l
].regno_allocno_map
395 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
397 memset (ira_loop_nodes
[l
].regno_allocno_map
, 0,
398 sizeof (ira_allocno_t
) * max_regno
);
400 ira_free (ira_regno_allocno_map
);
401 ira_regno_allocno_map
402 = (ira_allocno_t
*) ira_allocate (max_regno
* sizeof (ira_allocno_t
));
403 memset (ira_regno_allocno_map
, 0, max_regno
* sizeof (ira_allocno_t
));
404 FOR_EACH_ALLOCNO (a
, ai
)
406 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
407 /* Caps are not in the regno allocno maps. */
409 regno
= ALLOCNO_REGNO (a
);
410 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
411 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
412 ira_regno_allocno_map
[regno
] = a
;
413 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
414 /* Remember that we can create temporary allocnos to break
415 cycles in register shuffle. */
416 loop_tree_node
->regno_allocno_map
[regno
] = a
;
421 /* Pools for allocnos, allocno live ranges and objects. */
422 static alloc_pool allocno_pool
, live_range_pool
, object_pool
;
424 /* Vec containing references to all created allocnos. It is a
425 container of array allocnos. */
426 static vec
<ira_allocno_t
> allocno_vec
;
428 /* Vec containing references to all created ira_objects. It is a
429 container of ira_object_id_map. */
430 static vec
<ira_object_t
> ira_object_id_map_vec
;
432 /* Initialize data concerning allocnos. */
434 initiate_allocnos (void)
437 = create_alloc_pool ("live ranges",
438 sizeof (struct live_range
), 100);
440 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno
), 100);
442 = create_alloc_pool ("objects", sizeof (struct ira_object
), 100);
443 allocno_vec
.create (max_reg_num () * 2);
445 ira_allocnos_num
= 0;
447 ira_object_id_map_vec
.create (max_reg_num () * 2);
448 ira_object_id_map
= NULL
;
449 ira_regno_allocno_map
450 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
451 * sizeof (ira_allocno_t
));
452 memset (ira_regno_allocno_map
, 0, max_reg_num () * sizeof (ira_allocno_t
));
455 /* Create and return an object corresponding to a new allocno A. */
457 ira_create_object (ira_allocno_t a
, int subword
)
459 enum reg_class aclass
= ALLOCNO_CLASS (a
);
460 ira_object_t obj
= (ira_object_t
) pool_alloc (object_pool
);
462 OBJECT_ALLOCNO (obj
) = a
;
463 OBJECT_SUBWORD (obj
) = subword
;
464 OBJECT_CONFLICT_ID (obj
) = ira_objects_num
;
465 OBJECT_CONFLICT_VEC_P (obj
) = false;
466 OBJECT_CONFLICT_ARRAY (obj
) = NULL
;
467 OBJECT_NUM_CONFLICTS (obj
) = 0;
468 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
469 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
470 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
471 reg_class_contents
[aclass
]);
472 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
473 reg_class_contents
[aclass
]);
474 OBJECT_MIN (obj
) = INT_MAX
;
475 OBJECT_MAX (obj
) = -1;
476 OBJECT_LIVE_RANGES (obj
) = NULL
;
478 ira_object_id_map_vec
.safe_push (obj
);
480 = ira_object_id_map_vec
.address ();
481 ira_objects_num
= ira_object_id_map_vec
.length ();
486 /* Create and return the allocno corresponding to REGNO in
487 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
488 same regno if CAP_P is FALSE. */
490 ira_create_allocno (int regno
, bool cap_p
,
491 ira_loop_tree_node_t loop_tree_node
)
495 a
= (ira_allocno_t
) pool_alloc (allocno_pool
);
496 ALLOCNO_REGNO (a
) = regno
;
497 ALLOCNO_LOOP_TREE_NODE (a
) = loop_tree_node
;
500 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
501 ira_regno_allocno_map
[regno
] = a
;
502 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
503 /* Remember that we can create temporary allocnos to break
504 cycles in register shuffle on region borders (see
506 loop_tree_node
->regno_allocno_map
[regno
] = a
;
508 ALLOCNO_CAP (a
) = NULL
;
509 ALLOCNO_CAP_MEMBER (a
) = NULL
;
510 ALLOCNO_NUM (a
) = ira_allocnos_num
;
511 bitmap_set_bit (loop_tree_node
->all_allocnos
, ALLOCNO_NUM (a
));
512 ALLOCNO_NREFS (a
) = 0;
513 ALLOCNO_FREQ (a
) = 0;
514 ALLOCNO_HARD_REGNO (a
) = -1;
515 ALLOCNO_CALL_FREQ (a
) = 0;
516 ALLOCNO_CALLS_CROSSED_NUM (a
) = 0;
517 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
) = 0;
519 ALLOCNO_NO_STACK_REG_P (a
) = false;
520 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = false;
522 ALLOCNO_DONT_REASSIGN_P (a
) = false;
523 ALLOCNO_BAD_SPILL_P (a
) = false;
524 ALLOCNO_ASSIGNED_P (a
) = false;
525 ALLOCNO_MODE (a
) = (regno
< 0 ? VOIDmode
: PSEUDO_REGNO_MODE (regno
));
526 ALLOCNO_PREFS (a
) = NULL
;
527 ALLOCNO_COPIES (a
) = NULL
;
528 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
529 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
530 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
531 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
532 ALLOCNO_CLASS (a
) = NO_REGS
;
533 ALLOCNO_UPDATED_CLASS_COST (a
) = 0;
534 ALLOCNO_CLASS_COST (a
) = 0;
535 ALLOCNO_MEMORY_COST (a
) = 0;
536 ALLOCNO_UPDATED_MEMORY_COST (a
) = 0;
537 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) = 0;
538 ALLOCNO_NUM_OBJECTS (a
) = 0;
540 ALLOCNO_ADD_DATA (a
) = NULL
;
541 allocno_vec
.safe_push (a
);
542 ira_allocnos
= allocno_vec
.address ();
543 ira_allocnos_num
= allocno_vec
.length ();
548 /* Set up register class for A and update its conflict hard
551 ira_set_allocno_class (ira_allocno_t a
, enum reg_class aclass
)
553 ira_allocno_object_iterator oi
;
556 ALLOCNO_CLASS (a
) = aclass
;
557 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
559 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
560 reg_class_contents
[aclass
]);
561 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
562 reg_class_contents
[aclass
]);
566 /* Determine the number of objects we should associate with allocno A
567 and allocate them. */
569 ira_create_allocno_objects (ira_allocno_t a
)
571 enum machine_mode mode
= ALLOCNO_MODE (a
);
572 enum reg_class aclass
= ALLOCNO_CLASS (a
);
573 int n
= ira_reg_class_max_nregs
[aclass
][mode
];
576 if (GET_MODE_SIZE (mode
) != 2 * UNITS_PER_WORD
|| n
!= 2)
579 ALLOCNO_NUM_OBJECTS (a
) = n
;
580 for (i
= 0; i
< n
; i
++)
581 ALLOCNO_OBJECT (a
, i
) = ira_create_object (a
, i
);
584 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
585 ALLOCNO_OBJECT structures. This must be called after the allocno
586 classes are known. */
588 create_allocno_objects (void)
591 ira_allocno_iterator ai
;
593 FOR_EACH_ALLOCNO (a
, ai
)
594 ira_create_allocno_objects (a
);
597 /* Merge hard register conflict information for all objects associated with
598 allocno TO into the corresponding objects associated with FROM.
599 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
601 merge_hard_reg_conflicts (ira_allocno_t from
, ira_allocno_t to
,
605 gcc_assert (ALLOCNO_NUM_OBJECTS (to
) == ALLOCNO_NUM_OBJECTS (from
));
606 for (i
= 0; i
< ALLOCNO_NUM_OBJECTS (to
); i
++)
608 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
609 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
612 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj
),
613 OBJECT_CONFLICT_HARD_REGS (from_obj
));
614 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj
),
615 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj
));
618 if (!total_only
&& ALLOCNO_NO_STACK_REG_P (from
))
619 ALLOCNO_NO_STACK_REG_P (to
) = true;
620 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from
))
621 ALLOCNO_TOTAL_NO_STACK_REG_P (to
) = true;
625 /* Update hard register conflict information for all objects associated with
626 A to include the regs in SET. */
628 ior_hard_reg_conflicts (ira_allocno_t a
, HARD_REG_SET
*set
)
630 ira_allocno_object_iterator i
;
633 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, i
)
635 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), *set
);
636 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), *set
);
640 /* Return TRUE if a conflict vector with NUM elements is more
641 profitable than a conflict bit vector for OBJ. */
643 ira_conflict_vector_profitable_p (ira_object_t obj
, int num
)
646 int max
= OBJECT_MAX (obj
);
647 int min
= OBJECT_MIN (obj
);
650 /* We prefer a bit vector in such case because it does not result
654 nw
= (max
- min
+ IRA_INT_BITS
) / IRA_INT_BITS
;
655 return (2 * sizeof (ira_object_t
) * (num
+ 1)
656 < 3 * nw
* sizeof (IRA_INT_TYPE
));
659 /* Allocates and initialize the conflict vector of OBJ for NUM
660 conflicting objects. */
662 ira_allocate_conflict_vec (ira_object_t obj
, int num
)
667 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
668 num
++; /* for NULL end marker */
669 size
= sizeof (ira_object_t
) * num
;
670 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
671 vec
= (ira_object_t
*) OBJECT_CONFLICT_ARRAY (obj
);
673 OBJECT_NUM_CONFLICTS (obj
) = 0;
674 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
675 OBJECT_CONFLICT_VEC_P (obj
) = true;
678 /* Allocate and initialize the conflict bit vector of OBJ. */
680 allocate_conflict_bit_vec (ira_object_t obj
)
684 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
685 size
= ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
686 / IRA_INT_BITS
* sizeof (IRA_INT_TYPE
));
687 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
688 memset (OBJECT_CONFLICT_ARRAY (obj
), 0, size
);
689 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
690 OBJECT_CONFLICT_VEC_P (obj
) = false;
693 /* Allocate and initialize the conflict vector or conflict bit vector
694 of OBJ for NUM conflicting allocnos whatever is more profitable. */
696 ira_allocate_object_conflicts (ira_object_t obj
, int num
)
698 if (ira_conflict_vector_profitable_p (obj
, num
))
699 ira_allocate_conflict_vec (obj
, num
);
701 allocate_conflict_bit_vec (obj
);
704 /* Add OBJ2 to the conflicts of OBJ1. */
706 add_to_conflicts (ira_object_t obj1
, ira_object_t obj2
)
711 if (OBJECT_CONFLICT_VEC_P (obj1
))
713 ira_object_t
*vec
= OBJECT_CONFLICT_VEC (obj1
);
714 int curr_num
= OBJECT_NUM_CONFLICTS (obj1
);
716 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < num
* sizeof (ira_object_t
))
718 ira_object_t
*newvec
;
719 size
= (3 * num
/ 2 + 1) * sizeof (ira_allocno_t
);
720 newvec
= (ira_object_t
*) ira_allocate (size
);
721 memcpy (newvec
, vec
, curr_num
* sizeof (ira_object_t
));
724 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
725 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
729 OBJECT_NUM_CONFLICTS (obj1
)++;
733 int nw
, added_head_nw
, id
;
734 IRA_INT_TYPE
*vec
= OBJECT_CONFLICT_BITVEC (obj1
);
736 id
= OBJECT_CONFLICT_ID (obj2
);
737 if (OBJECT_MIN (obj1
) > id
)
739 /* Expand head of the bit vector. */
740 added_head_nw
= (OBJECT_MIN (obj1
) - id
- 1) / IRA_INT_BITS
+ 1;
741 nw
= (OBJECT_MAX (obj1
) - OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
742 size
= (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
);
743 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) >= size
)
745 memmove ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
746 vec
, nw
* sizeof (IRA_INT_TYPE
));
747 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
752 = (3 * (nw
+ added_head_nw
) / 2 + 1) * sizeof (IRA_INT_TYPE
);
753 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
754 memcpy ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
755 OBJECT_CONFLICT_ARRAY (obj1
), nw
* sizeof (IRA_INT_TYPE
));
756 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
758 + (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
),
759 0, size
- (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
));
760 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
761 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
762 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
764 OBJECT_MIN (obj1
) -= added_head_nw
* IRA_INT_BITS
;
766 else if (OBJECT_MAX (obj1
) < id
)
768 nw
= (id
- OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
769 size
= nw
* sizeof (IRA_INT_TYPE
);
770 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < size
)
772 /* Expand tail of the bit vector. */
773 size
= (3 * nw
/ 2 + 1) * sizeof (IRA_INT_TYPE
);
774 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
775 memcpy (vec
, OBJECT_CONFLICT_ARRAY (obj1
), OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
776 memset ((char *) vec
+ OBJECT_CONFLICT_ARRAY_SIZE (obj1
),
777 0, size
- OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
778 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
779 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
780 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
782 OBJECT_MAX (obj1
) = id
;
784 SET_MINMAX_SET_BIT (vec
, id
, OBJECT_MIN (obj1
), OBJECT_MAX (obj1
));
788 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
790 ira_add_conflict (ira_object_t obj1
, ira_object_t obj2
)
792 add_to_conflicts (obj1
, obj2
);
793 add_to_conflicts (obj2
, obj1
);
796 /* Clear all conflicts of OBJ. */
798 clear_conflicts (ira_object_t obj
)
800 if (OBJECT_CONFLICT_VEC_P (obj
))
802 OBJECT_NUM_CONFLICTS (obj
) = 0;
803 OBJECT_CONFLICT_VEC (obj
)[0] = NULL
;
805 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj
) != 0)
809 nw
= (OBJECT_MAX (obj
) - OBJECT_MIN (obj
)) / IRA_INT_BITS
+ 1;
810 memset (OBJECT_CONFLICT_BITVEC (obj
), 0, nw
* sizeof (IRA_INT_TYPE
));
814 /* The array used to find duplications in conflict vectors of
816 static int *conflict_check
;
818 /* The value used to mark allocation presence in conflict vector of
819 the current allocno. */
820 static int curr_conflict_check_tick
;
822 /* Remove duplications in conflict vector of OBJ. */
824 compress_conflict_vec (ira_object_t obj
)
826 ira_object_t
*vec
, conflict_obj
;
829 ira_assert (OBJECT_CONFLICT_VEC_P (obj
));
830 vec
= OBJECT_CONFLICT_VEC (obj
);
831 curr_conflict_check_tick
++;
832 for (i
= j
= 0; (conflict_obj
= vec
[i
]) != NULL
; i
++)
834 int id
= OBJECT_CONFLICT_ID (conflict_obj
);
835 if (conflict_check
[id
] != curr_conflict_check_tick
)
837 conflict_check
[id
] = curr_conflict_check_tick
;
838 vec
[j
++] = conflict_obj
;
841 OBJECT_NUM_CONFLICTS (obj
) = j
;
845 /* Remove duplications in conflict vectors of all allocnos. */
847 compress_conflict_vecs (void)
850 ira_object_iterator oi
;
852 conflict_check
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
853 memset (conflict_check
, 0, sizeof (int) * ira_objects_num
);
854 curr_conflict_check_tick
= 0;
855 FOR_EACH_OBJECT (obj
, oi
)
857 if (OBJECT_CONFLICT_VEC_P (obj
))
858 compress_conflict_vec (obj
);
860 ira_free (conflict_check
);
863 /* This recursive function outputs allocno A and if it is a cap the
864 function outputs its members. */
866 ira_print_expanded_allocno (ira_allocno_t a
)
870 fprintf (ira_dump_file
, " a%d(r%d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
871 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
872 fprintf (ira_dump_file
, ",b%d", bb
->index
);
874 fprintf (ira_dump_file
, ",l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
875 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
877 fprintf (ira_dump_file
, ":");
878 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a
));
880 fprintf (ira_dump_file
, ")");
883 /* Create and return the cap representing allocno A in the
886 create_cap_allocno (ira_allocno_t a
)
889 ira_loop_tree_node_t parent
;
890 enum reg_class aclass
;
892 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
893 cap
= ira_create_allocno (ALLOCNO_REGNO (a
), true, parent
);
894 ALLOCNO_MODE (cap
) = ALLOCNO_MODE (a
);
895 aclass
= ALLOCNO_CLASS (a
);
896 ira_set_allocno_class (cap
, aclass
);
897 ira_create_allocno_objects (cap
);
898 ALLOCNO_CAP_MEMBER (cap
) = a
;
899 ALLOCNO_CAP (a
) = cap
;
900 ALLOCNO_CLASS_COST (cap
) = ALLOCNO_CLASS_COST (a
);
901 ALLOCNO_MEMORY_COST (cap
) = ALLOCNO_MEMORY_COST (a
);
902 ira_allocate_and_copy_costs
903 (&ALLOCNO_HARD_REG_COSTS (cap
), aclass
, ALLOCNO_HARD_REG_COSTS (a
));
904 ira_allocate_and_copy_costs
905 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap
), aclass
,
906 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
907 ALLOCNO_BAD_SPILL_P (cap
) = ALLOCNO_BAD_SPILL_P (a
);
908 ALLOCNO_NREFS (cap
) = ALLOCNO_NREFS (a
);
909 ALLOCNO_FREQ (cap
) = ALLOCNO_FREQ (a
);
910 ALLOCNO_CALL_FREQ (cap
) = ALLOCNO_CALL_FREQ (a
);
912 merge_hard_reg_conflicts (a
, cap
, false);
914 ALLOCNO_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CALLS_CROSSED_NUM (a
);
915 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
916 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
918 fprintf (ira_dump_file
, " Creating cap ");
919 ira_print_expanded_allocno (cap
);
920 fprintf (ira_dump_file
, "\n");
925 /* Create and return a live range for OBJECT with given attributes. */
927 ira_create_live_range (ira_object_t obj
, int start
, int finish
,
932 p
= (live_range_t
) pool_alloc (live_range_pool
);
940 /* Create a new live range for OBJECT and queue it at the head of its
943 ira_add_live_range_to_object (ira_object_t object
, int start
, int finish
)
946 p
= ira_create_live_range (object
, start
, finish
,
947 OBJECT_LIVE_RANGES (object
));
948 OBJECT_LIVE_RANGES (object
) = p
;
951 /* Copy allocno live range R and return the result. */
953 copy_live_range (live_range_t r
)
957 p
= (live_range_t
) pool_alloc (live_range_pool
);
962 /* Copy allocno live range list given by its head R and return the
965 ira_copy_live_range_list (live_range_t r
)
967 live_range_t p
, first
, last
;
971 for (first
= last
= NULL
; r
!= NULL
; r
= r
->next
)
973 p
= copy_live_range (r
);
983 /* Merge ranges R1 and R2 and returns the result. The function
984 maintains the order of ranges and tries to minimize number of the
987 ira_merge_live_ranges (live_range_t r1
, live_range_t r2
)
989 live_range_t first
, last
, temp
;
995 for (first
= last
= NULL
; r1
!= NULL
&& r2
!= NULL
;)
997 if (r1
->start
< r2
->start
)
1003 if (r1
->start
<= r2
->finish
+ 1)
1005 /* Intersected ranges: merge r1 and r2 into r1. */
1006 r1
->start
= r2
->start
;
1007 if (r1
->finish
< r2
->finish
)
1008 r1
->finish
= r2
->finish
;
1011 ira_finish_live_range (temp
);
1014 /* To try to merge with subsequent ranges in r1. */
1021 /* Add r1 to the result. */
1032 /* To try to merge with subsequent ranges in r2. */
1044 ira_assert (r1
->next
== NULL
);
1046 else if (r2
!= NULL
)
1052 ira_assert (r2
->next
== NULL
);
1056 ira_assert (last
->next
== NULL
);
1061 /* Return TRUE if live ranges R1 and R2 intersect. */
1063 ira_live_ranges_intersect_p (live_range_t r1
, live_range_t r2
)
1065 /* Remember the live ranges are always kept ordered. */
1066 while (r1
!= NULL
&& r2
!= NULL
)
1068 if (r1
->start
> r2
->finish
)
1070 else if (r2
->start
> r1
->finish
)
1078 /* Free allocno live range R. */
1080 ira_finish_live_range (live_range_t r
)
1082 pool_free (live_range_pool
, r
);
1085 /* Free list of allocno live ranges starting with R. */
1087 ira_finish_live_range_list (live_range_t r
)
1089 live_range_t next_r
;
1091 for (; r
!= NULL
; r
= next_r
)
1094 ira_finish_live_range (r
);
1098 /* Free updated register costs of allocno A. */
1100 ira_free_allocno_updated_costs (ira_allocno_t a
)
1102 enum reg_class aclass
;
1104 aclass
= ALLOCNO_CLASS (a
);
1105 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1106 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1107 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1108 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1109 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1111 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1114 /* Free and nullify all cost vectors allocated earlier for allocno
1117 ira_free_allocno_costs (ira_allocno_t a
)
1119 enum reg_class aclass
= ALLOCNO_CLASS (a
);
1121 ira_allocno_object_iterator oi
;
1123 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
1125 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj
));
1126 ira_object_id_map
[OBJECT_CONFLICT_ID (obj
)] = NULL
;
1127 if (OBJECT_CONFLICT_ARRAY (obj
) != NULL
)
1128 ira_free (OBJECT_CONFLICT_ARRAY (obj
));
1129 pool_free (object_pool
, obj
);
1132 ira_allocnos
[ALLOCNO_NUM (a
)] = NULL
;
1133 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
1134 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a
), aclass
);
1135 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1136 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
);
1137 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1138 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1139 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1140 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1142 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
1143 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1144 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1145 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1148 /* Free the memory allocated for allocno A. */
1150 finish_allocno (ira_allocno_t a
)
1152 ira_free_allocno_costs (a
);
1153 pool_free (allocno_pool
, a
);
1156 /* Free the memory allocated for all allocnos. */
1158 finish_allocnos (void)
1161 ira_allocno_iterator ai
;
1163 FOR_EACH_ALLOCNO (a
, ai
)
1165 ira_free (ira_regno_allocno_map
);
1166 ira_object_id_map_vec
.release ();
1167 allocno_vec
.release ();
1168 free_alloc_pool (allocno_pool
);
1169 free_alloc_pool (object_pool
);
1170 free_alloc_pool (live_range_pool
);
1175 /* Pools for allocno preferences. */
1176 static alloc_pool pref_pool
;
1178 /* Vec containing references to all created preferences. It is a
1179 container of array ira_prefs. */
1180 static vec
<ira_pref_t
> pref_vec
;
1182 /* The function initializes data concerning allocno prefs. */
1184 initiate_prefs (void)
1187 = create_alloc_pool ("prefs", sizeof (struct ira_allocno_pref
), 100);
1188 pref_vec
.create (get_max_uid ());
1193 /* Return pref for A and HARD_REGNO if any. */
1195 find_allocno_pref (ira_allocno_t a
, int hard_regno
)
1199 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1200 if (pref
->allocno
== a
&& pref
->hard_regno
== hard_regno
)
1205 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1207 ira_create_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1211 pref
= (ira_pref_t
) pool_alloc (pref_pool
);
1212 pref
->num
= ira_prefs_num
;
1214 pref
->hard_regno
= hard_regno
;
1216 pref_vec
.safe_push (pref
);
1217 ira_prefs
= pref_vec
.address ();
1218 ira_prefs_num
= pref_vec
.length ();
1222 /* Attach a pref PREF to the cooresponding allocno. */
1224 add_allocno_pref_to_list (ira_pref_t pref
)
1226 ira_allocno_t a
= pref
->allocno
;
1228 pref
->next_pref
= ALLOCNO_PREFS (a
);
1229 ALLOCNO_PREFS (a
) = pref
;
1232 /* Create (or update frequency if the pref already exists) the pref of
1233 allocnos A preferring HARD_REGNO with frequency FREQ. */
1235 ira_add_allocno_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1241 if ((pref
= find_allocno_pref (a
, hard_regno
)) != NULL
)
1246 pref
= ira_create_pref (a
, hard_regno
, freq
);
1247 ira_assert (a
!= NULL
);
1248 add_allocno_pref_to_list (pref
);
1251 /* Print info about PREF into file F. */
1253 print_pref (FILE *f
, ira_pref_t pref
)
1255 fprintf (f
, " pref%d:a%d(r%d)<-hr%d@%d\n", pref
->num
,
1256 ALLOCNO_NUM (pref
->allocno
), ALLOCNO_REGNO (pref
->allocno
),
1257 pref
->hard_regno
, pref
->freq
);
1260 /* Print info about PREF into stderr. */
1262 ira_debug_pref (ira_pref_t pref
)
1264 print_pref (stderr
, pref
);
1267 /* Print info about all prefs into file F. */
1269 print_prefs (FILE *f
)
1272 ira_pref_iterator pi
;
1274 FOR_EACH_PREF (pref
, pi
)
1275 print_pref (f
, pref
);
1278 /* Print info about all prefs into stderr. */
1280 ira_debug_prefs (void)
1282 print_prefs (stderr
);
1285 /* Print info about prefs involving allocno A into file F. */
1287 print_allocno_prefs (FILE *f
, ira_allocno_t a
)
1291 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1292 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1293 fprintf (f
, " pref%d:hr%d@%d", pref
->num
, pref
->hard_regno
, pref
->freq
);
1297 /* Print info about prefs involving allocno A into stderr. */
1299 ira_debug_allocno_prefs (ira_allocno_t a
)
1301 print_allocno_prefs (stderr
, a
);
1304 /* The function frees memory allocated for PREF. */
1306 finish_pref (ira_pref_t pref
)
1308 ira_prefs
[pref
->num
] = NULL
;
1309 pool_free (pref_pool
, pref
);
1312 /* Remove PREF from the list of allocno prefs and free memory for
1315 ira_remove_pref (ira_pref_t pref
)
1317 ira_pref_t cpref
, prev
;
1319 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
1320 fprintf (ira_dump_file
, " Removing pref%d:hr%d@%d\n",
1321 pref
->num
, pref
->hard_regno
, pref
->freq
);
1322 for (prev
= NULL
, cpref
= ALLOCNO_PREFS (pref
->allocno
);
1324 prev
= cpref
, cpref
= cpref
->next_pref
)
1327 ira_assert (cpref
!= NULL
);
1329 ALLOCNO_PREFS (pref
->allocno
) = pref
->next_pref
;
1331 prev
->next_pref
= pref
->next_pref
;
1335 /* Remove all prefs of allocno A. */
1337 ira_remove_allocno_prefs (ira_allocno_t a
)
1339 ira_pref_t pref
, next_pref
;
1341 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
1343 next_pref
= pref
->next_pref
;
1346 ALLOCNO_PREFS (a
) = NULL
;
1349 /* Free memory allocated for all prefs. */
1354 ira_pref_iterator pi
;
1356 FOR_EACH_PREF (pref
, pi
)
1358 pref_vec
.release ();
1359 free_alloc_pool (pref_pool
);
1364 /* Pools for copies. */
1365 static alloc_pool copy_pool
;
1367 /* Vec containing references to all created copies. It is a
1368 container of array ira_copies. */
1369 static vec
<ira_copy_t
> copy_vec
;
1371 /* The function initializes data concerning allocno copies. */
1373 initiate_copies (void)
1376 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy
), 100);
1377 copy_vec
.create (get_max_uid ());
1382 /* Return copy connecting A1 and A2 and originated from INSN of
1383 LOOP_TREE_NODE if any. */
1385 find_allocno_copy (ira_allocno_t a1
, ira_allocno_t a2
, rtx insn
,
1386 ira_loop_tree_node_t loop_tree_node
)
1388 ira_copy_t cp
, next_cp
;
1389 ira_allocno_t another_a
;
1391 for (cp
= ALLOCNO_COPIES (a1
); cp
!= NULL
; cp
= next_cp
)
1393 if (cp
->first
== a1
)
1395 next_cp
= cp
->next_first_allocno_copy
;
1396 another_a
= cp
->second
;
1398 else if (cp
->second
== a1
)
1400 next_cp
= cp
->next_second_allocno_copy
;
1401 another_a
= cp
->first
;
1405 if (another_a
== a2
&& cp
->insn
== insn
1406 && cp
->loop_tree_node
== loop_tree_node
)
1412 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1413 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1415 ira_create_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1416 bool constraint_p
, rtx insn
,
1417 ira_loop_tree_node_t loop_tree_node
)
1421 cp
= (ira_copy_t
) pool_alloc (copy_pool
);
1422 cp
->num
= ira_copies_num
;
1424 cp
->second
= second
;
1426 cp
->constraint_p
= constraint_p
;
1428 cp
->loop_tree_node
= loop_tree_node
;
1429 copy_vec
.safe_push (cp
);
1430 ira_copies
= copy_vec
.address ();
1431 ira_copies_num
= copy_vec
.length ();
1435 /* Attach a copy CP to allocnos involved into the copy. */
1437 add_allocno_copy_to_list (ira_copy_t cp
)
1439 ira_allocno_t first
= cp
->first
, second
= cp
->second
;
1441 cp
->prev_first_allocno_copy
= NULL
;
1442 cp
->prev_second_allocno_copy
= NULL
;
1443 cp
->next_first_allocno_copy
= ALLOCNO_COPIES (first
);
1444 if (cp
->next_first_allocno_copy
!= NULL
)
1446 if (cp
->next_first_allocno_copy
->first
== first
)
1447 cp
->next_first_allocno_copy
->prev_first_allocno_copy
= cp
;
1449 cp
->next_first_allocno_copy
->prev_second_allocno_copy
= cp
;
1451 cp
->next_second_allocno_copy
= ALLOCNO_COPIES (second
);
1452 if (cp
->next_second_allocno_copy
!= NULL
)
1454 if (cp
->next_second_allocno_copy
->second
== second
)
1455 cp
->next_second_allocno_copy
->prev_second_allocno_copy
= cp
;
1457 cp
->next_second_allocno_copy
->prev_first_allocno_copy
= cp
;
1459 ALLOCNO_COPIES (first
) = cp
;
1460 ALLOCNO_COPIES (second
) = cp
;
1463 /* Make a copy CP a canonical copy where number of the
1464 first allocno is less than the second one. */
1466 swap_allocno_copy_ends_if_necessary (ira_copy_t cp
)
1471 if (ALLOCNO_NUM (cp
->first
) <= ALLOCNO_NUM (cp
->second
))
1475 cp
->first
= cp
->second
;
1478 temp_cp
= cp
->prev_first_allocno_copy
;
1479 cp
->prev_first_allocno_copy
= cp
->prev_second_allocno_copy
;
1480 cp
->prev_second_allocno_copy
= temp_cp
;
1482 temp_cp
= cp
->next_first_allocno_copy
;
1483 cp
->next_first_allocno_copy
= cp
->next_second_allocno_copy
;
1484 cp
->next_second_allocno_copy
= temp_cp
;
1487 /* Create (or update frequency if the copy already exists) and return
1488 the copy of allocnos FIRST and SECOND with frequency FREQ
1489 corresponding to move insn INSN (if any) and originated from
1492 ira_add_allocno_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1493 bool constraint_p
, rtx insn
,
1494 ira_loop_tree_node_t loop_tree_node
)
1498 if ((cp
= find_allocno_copy (first
, second
, insn
, loop_tree_node
)) != NULL
)
1503 cp
= ira_create_copy (first
, second
, freq
, constraint_p
, insn
,
1505 ira_assert (first
!= NULL
&& second
!= NULL
);
1506 add_allocno_copy_to_list (cp
);
1507 swap_allocno_copy_ends_if_necessary (cp
);
1511 /* Print info about copy CP into file F. */
1513 print_copy (FILE *f
, ira_copy_t cp
)
1515 fprintf (f
, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp
->num
,
1516 ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
1517 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
), cp
->freq
,
1519 ? "move" : cp
->constraint_p
? "constraint" : "shuffle");
1523 debug (ira_allocno_copy
&ref
)
1525 print_copy (stderr
, &ref
);
1529 debug (ira_allocno_copy
*ptr
)
1534 fprintf (stderr
, "<nil>\n");
1537 /* Print info about copy CP into stderr. */
1539 ira_debug_copy (ira_copy_t cp
)
1541 print_copy (stderr
, cp
);
1544 /* Print info about all copies into file F. */
1546 print_copies (FILE *f
)
1549 ira_copy_iterator ci
;
1551 FOR_EACH_COPY (cp
, ci
)
1555 /* Print info about all copies into stderr. */
1557 ira_debug_copies (void)
1559 print_copies (stderr
);
1562 /* Print info about copies involving allocno A into file F. */
1564 print_allocno_copies (FILE *f
, ira_allocno_t a
)
1566 ira_allocno_t another_a
;
1567 ira_copy_t cp
, next_cp
;
1569 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1570 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
1574 next_cp
= cp
->next_first_allocno_copy
;
1575 another_a
= cp
->second
;
1577 else if (cp
->second
== a
)
1579 next_cp
= cp
->next_second_allocno_copy
;
1580 another_a
= cp
->first
;
1584 fprintf (f
, " cp%d:a%d(r%d)@%d", cp
->num
,
1585 ALLOCNO_NUM (another_a
), ALLOCNO_REGNO (another_a
), cp
->freq
);
1591 debug (ira_allocno
&ref
)
1593 print_allocno_copies (stderr
, &ref
);
1597 debug (ira_allocno
*ptr
)
1602 fprintf (stderr
, "<nil>\n");
1606 /* Print info about copies involving allocno A into stderr. */
1608 ira_debug_allocno_copies (ira_allocno_t a
)
1610 print_allocno_copies (stderr
, a
);
1613 /* The function frees memory allocated for copy CP. */
1615 finish_copy (ira_copy_t cp
)
1617 pool_free (copy_pool
, cp
);
1621 /* Free memory allocated for all copies. */
1623 finish_copies (void)
1626 ira_copy_iterator ci
;
1628 FOR_EACH_COPY (cp
, ci
)
1630 copy_vec
.release ();
1631 free_alloc_pool (copy_pool
);
1636 /* Pools for cost vectors. It is defined only for allocno classes. */
1637 static alloc_pool cost_vector_pool
[N_REG_CLASSES
];
1639 /* The function initiates work with hard register cost vectors. It
1640 creates allocation pool for each allocno class. */
1642 initiate_cost_vectors (void)
1645 enum reg_class aclass
;
1647 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1649 aclass
= ira_allocno_classes
[i
];
1650 cost_vector_pool
[aclass
]
1651 = create_alloc_pool ("cost vectors",
1652 sizeof (int) * ira_class_hard_regs_num
[aclass
],
1657 /* Allocate and return a cost vector VEC for ACLASS. */
1659 ira_allocate_cost_vector (reg_class_t aclass
)
1661 return (int *) pool_alloc (cost_vector_pool
[(int) aclass
]);
1664 /* Free a cost vector VEC for ACLASS. */
1666 ira_free_cost_vector (int *vec
, reg_class_t aclass
)
1668 ira_assert (vec
!= NULL
);
1669 pool_free (cost_vector_pool
[(int) aclass
], vec
);
1672 /* Finish work with hard register cost vectors. Release allocation
1673 pool for each allocno class. */
1675 finish_cost_vectors (void)
1678 enum reg_class aclass
;
1680 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1682 aclass
= ira_allocno_classes
[i
];
1683 free_alloc_pool (cost_vector_pool
[aclass
]);
1689 /* Compute a post-ordering of the reverse control flow of the loop body
1690 designated by the children nodes of LOOP_NODE, whose body nodes in
1691 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1692 of the reverse loop body.
1694 For the post-order of the reverse CFG, we visit the basic blocks in
1695 LOOP_PREORDER array in the reverse order of where they appear.
1696 This is important: We do not just want to compute a post-order of
1697 the reverse CFG, we want to make a best-guess for a visiting order that
1698 minimizes the number of chain elements per allocno live range. If the
1699 blocks would be visited in a different order, we would still compute a
1700 correct post-ordering but it would be less likely that two nodes
1701 connected by an edge in the CFG are neighbours in the topsort. */
1703 static vec
<ira_loop_tree_node_t
>
1704 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED
,
1705 vec
<ira_loop_tree_node_t
> loop_preorder
)
1707 vec
<ira_loop_tree_node_t
> topsort_nodes
= vNULL
;
1708 unsigned int n_loop_preorder
;
1710 n_loop_preorder
= loop_preorder
.length ();
1711 if (n_loop_preorder
!= 0)
1713 ira_loop_tree_node_t subloop_node
;
1715 auto_vec
<ira_loop_tree_node_t
> dfs_stack
;
1717 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1718 the flag to mark blocks we still have to visit to add them to
1719 our post-order. Define an alias to avoid confusion. */
1720 #define BB_TO_VISIT BB_VISITED
1722 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1724 gcc_checking_assert (! (subloop_node
->bb
->flags
& BB_TO_VISIT
));
1725 subloop_node
->bb
->flags
|= BB_TO_VISIT
;
1728 topsort_nodes
.create (n_loop_preorder
);
1729 dfs_stack
.create (n_loop_preorder
);
1731 FOR_EACH_VEC_ELT_REVERSE (loop_preorder
, i
, subloop_node
)
1733 if (! (subloop_node
->bb
->flags
& BB_TO_VISIT
))
1736 subloop_node
->bb
->flags
&= ~BB_TO_VISIT
;
1737 dfs_stack
.quick_push (subloop_node
);
1738 while (! dfs_stack
.is_empty ())
1743 ira_loop_tree_node_t n
= dfs_stack
.last ();
1744 FOR_EACH_EDGE (e
, ei
, n
->bb
->preds
)
1746 ira_loop_tree_node_t pred_node
;
1747 basic_block pred_bb
= e
->src
;
1749 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1752 pred_node
= IRA_BB_NODE_BY_INDEX (pred_bb
->index
);
1754 && (pred_node
->bb
->flags
& BB_TO_VISIT
))
1756 pred_node
->bb
->flags
&= ~BB_TO_VISIT
;
1757 dfs_stack
.quick_push (pred_node
);
1760 if (n
== dfs_stack
.last ())
1763 topsort_nodes
.quick_push (n
);
1771 gcc_assert (topsort_nodes
.length () == n_loop_preorder
);
1772 return topsort_nodes
;
1775 /* The current loop tree node and its regno allocno map. */
1776 ira_loop_tree_node_t ira_curr_loop_tree_node
;
1777 ira_allocno_t
*ira_curr_regno_allocno_map
;
1779 /* This recursive function traverses loop tree with root LOOP_NODE
1780 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1781 correspondingly in preorder and postorder. The function sets up
1782 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1783 basic block nodes of LOOP_NODE is also processed (before its
1786 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1787 the loop are passed in the *reverse* post-order of the *reverse*
1788 CFG. This is only used by ira_create_allocno_live_ranges, which
1789 wants to visit basic blocks in this order to minimize the number
1790 of elements per live range chain.
1791 Note that the loop tree nodes are still visited in the normal,
1792 forward post-order of the loop tree. */
1795 ira_traverse_loop_tree (bool bb_p
, ira_loop_tree_node_t loop_node
,
1796 void (*preorder_func
) (ira_loop_tree_node_t
),
1797 void (*postorder_func
) (ira_loop_tree_node_t
))
1799 ira_loop_tree_node_t subloop_node
;
1801 ira_assert (loop_node
->bb
== NULL
);
1802 ira_curr_loop_tree_node
= loop_node
;
1803 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1805 if (preorder_func
!= NULL
)
1806 (*preorder_func
) (loop_node
);
1810 auto_vec
<ira_loop_tree_node_t
> loop_preorder
;
1813 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1814 is set up such that nodes in the loop body appear in a pre-order
1815 of their place in the CFG. */
1816 for (subloop_node
= loop_node
->children
;
1817 subloop_node
!= NULL
;
1818 subloop_node
= subloop_node
->next
)
1819 if (subloop_node
->bb
!= NULL
)
1820 loop_preorder
.safe_push (subloop_node
);
1822 if (preorder_func
!= NULL
)
1823 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1824 (*preorder_func
) (subloop_node
);
1826 if (postorder_func
!= NULL
)
1828 vec
<ira_loop_tree_node_t
> loop_rev_postorder
=
1829 ira_loop_tree_body_rev_postorder (loop_node
, loop_preorder
);
1830 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder
, i
, subloop_node
)
1831 (*postorder_func
) (subloop_node
);
1832 loop_rev_postorder
.release ();
1836 for (subloop_node
= loop_node
->subloops
;
1837 subloop_node
!= NULL
;
1838 subloop_node
= subloop_node
->subloop_next
)
1840 ira_assert (subloop_node
->bb
== NULL
);
1841 ira_traverse_loop_tree (bb_p
, subloop_node
,
1842 preorder_func
, postorder_func
);
1845 ira_curr_loop_tree_node
= loop_node
;
1846 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1848 if (postorder_func
!= NULL
)
1849 (*postorder_func
) (loop_node
);
1854 /* The basic block currently being processed. */
1855 static basic_block curr_bb
;
1857 /* This recursive function creates allocnos corresponding to
1858 pseudo-registers containing in X. True OUTPUT_P means that X is
1861 create_insn_allocnos (rtx x
, bool output_p
)
1865 enum rtx_code code
= GET_CODE (x
);
1871 if ((regno
= REGNO (x
)) >= FIRST_PSEUDO_REGISTER
)
1875 if ((a
= ira_curr_regno_allocno_map
[regno
]) == NULL
)
1876 a
= ira_create_allocno (regno
, false, ira_curr_loop_tree_node
);
1878 ALLOCNO_NREFS (a
)++;
1879 ALLOCNO_FREQ (a
) += REG_FREQ_FROM_BB (curr_bb
);
1881 bitmap_set_bit (ira_curr_loop_tree_node
->modified_regnos
, regno
);
1885 else if (code
== SET
)
1887 create_insn_allocnos (SET_DEST (x
), true);
1888 create_insn_allocnos (SET_SRC (x
), false);
1891 else if (code
== CLOBBER
)
1893 create_insn_allocnos (XEXP (x
, 0), true);
1896 else if (code
== MEM
)
1898 create_insn_allocnos (XEXP (x
, 0), false);
1901 else if (code
== PRE_DEC
|| code
== POST_DEC
|| code
== PRE_INC
||
1902 code
== POST_INC
|| code
== POST_MODIFY
|| code
== PRE_MODIFY
)
1904 create_insn_allocnos (XEXP (x
, 0), true);
1905 create_insn_allocnos (XEXP (x
, 0), false);
1909 fmt
= GET_RTX_FORMAT (code
);
1910 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1913 create_insn_allocnos (XEXP (x
, i
), output_p
);
1914 else if (fmt
[i
] == 'E')
1915 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1916 create_insn_allocnos (XVECEXP (x
, i
, j
), output_p
);
1920 /* Create allocnos corresponding to pseudo-registers living in the
1921 basic block represented by the corresponding loop tree node
1924 create_bb_allocnos (ira_loop_tree_node_t bb_node
)
1931 curr_bb
= bb
= bb_node
->bb
;
1932 ira_assert (bb
!= NULL
);
1933 FOR_BB_INSNS_REVERSE (bb
, insn
)
1934 if (NONDEBUG_INSN_P (insn
))
1935 create_insn_allocnos (PATTERN (insn
), false);
1936 /* It might be a allocno living through from one subloop to
1938 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb
), FIRST_PSEUDO_REGISTER
, i
, bi
)
1939 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1940 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1943 /* Create allocnos corresponding to pseudo-registers living on edge E
1944 (a loop entry or exit). Also mark the allocnos as living on the
1947 create_loop_allocnos (edge e
)
1950 bitmap live_in_regs
, border_allocnos
;
1952 ira_loop_tree_node_t parent
;
1954 live_in_regs
= df_get_live_in (e
->dest
);
1955 border_allocnos
= ira_curr_loop_tree_node
->border_allocnos
;
1956 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e
->src
),
1957 FIRST_PSEUDO_REGISTER
, i
, bi
)
1958 if (bitmap_bit_p (live_in_regs
, i
))
1960 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1962 /* The order of creations is important for right
1963 ira_regno_allocno_map. */
1964 if ((parent
= ira_curr_loop_tree_node
->parent
) != NULL
1965 && parent
->regno_allocno_map
[i
] == NULL
)
1966 ira_create_allocno (i
, false, parent
);
1967 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1969 bitmap_set_bit (border_allocnos
,
1970 ALLOCNO_NUM (ira_curr_regno_allocno_map
[i
]));
1974 /* Create allocnos corresponding to pseudo-registers living in loop
1975 represented by the corresponding loop tree node LOOP_NODE. This
1976 function is called by ira_traverse_loop_tree. */
1978 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node
)
1980 if (loop_node
->bb
!= NULL
)
1981 create_bb_allocnos (loop_node
);
1982 else if (loop_node
!= ira_loop_tree_root
)
1989 ira_assert (current_loops
!= NULL
);
1990 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
1991 if (e
->src
!= loop_node
->loop
->latch
)
1992 create_loop_allocnos (e
);
1994 edges
= get_loop_exit_edges (loop_node
->loop
);
1995 FOR_EACH_VEC_ELT (edges
, i
, e
)
1996 create_loop_allocnos (e
);
2001 /* Propagate information about allocnos modified inside the loop given
2002 by its LOOP_TREE_NODE to its parent. */
2004 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node
)
2006 if (loop_tree_node
== ira_loop_tree_root
)
2008 ira_assert (loop_tree_node
->bb
== NULL
);
2009 bitmap_ior_into (loop_tree_node
->parent
->modified_regnos
,
2010 loop_tree_node
->modified_regnos
);
2013 /* Propagate new info about allocno A (see comments about accumulated
2014 info in allocno definition) to the corresponding allocno on upper
2015 loop tree level. So allocnos on upper levels accumulate
2016 information about the corresponding allocnos in nested regions.
2017 The new info means allocno info finally calculated in this
2020 propagate_allocno_info (void)
2023 ira_allocno_t a
, parent_a
;
2024 ira_loop_tree_node_t parent
;
2025 enum reg_class aclass
;
2027 if (flag_ira_region
!= IRA_REGION_ALL
2028 && flag_ira_region
!= IRA_REGION_MIXED
)
2030 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2031 for (a
= ira_regno_allocno_map
[i
];
2033 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2034 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) != NULL
2035 && (parent_a
= parent
->regno_allocno_map
[i
]) != NULL
2036 /* There are no caps yet at this point. So use
2037 border_allocnos to find allocnos for the propagation. */
2038 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a
)->border_allocnos
,
2041 if (! ALLOCNO_BAD_SPILL_P (a
))
2042 ALLOCNO_BAD_SPILL_P (parent_a
) = false;
2043 ALLOCNO_NREFS (parent_a
) += ALLOCNO_NREFS (a
);
2044 ALLOCNO_FREQ (parent_a
) += ALLOCNO_FREQ (a
);
2045 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
2046 merge_hard_reg_conflicts (a
, parent_a
, true);
2047 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
2048 += ALLOCNO_CALLS_CROSSED_NUM (a
);
2049 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
2050 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
2051 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
2052 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2053 aclass
= ALLOCNO_CLASS (a
);
2054 ira_assert (aclass
== ALLOCNO_CLASS (parent_a
));
2055 ira_allocate_and_accumulate_costs
2056 (&ALLOCNO_HARD_REG_COSTS (parent_a
), aclass
,
2057 ALLOCNO_HARD_REG_COSTS (a
));
2058 ira_allocate_and_accumulate_costs
2059 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
),
2061 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
2062 ALLOCNO_CLASS_COST (parent_a
)
2063 += ALLOCNO_CLASS_COST (a
);
2064 ALLOCNO_MEMORY_COST (parent_a
) += ALLOCNO_MEMORY_COST (a
);
2068 /* Create allocnos corresponding to pseudo-registers in the current
2069 function. Traverse the loop tree for this. */
2071 create_allocnos (void)
2073 /* We need to process BB first to correctly link allocnos by member
2074 next_regno_allocno. */
2075 ira_traverse_loop_tree (true, ira_loop_tree_root
,
2076 create_loop_tree_node_allocnos
, NULL
);
2078 ira_traverse_loop_tree (false, ira_loop_tree_root
, NULL
,
2079 propagate_modified_regnos
);
2084 /* The page contains function to remove some regions from a separate
2085 register allocation. We remove regions whose separate allocation
2086 will hardly improve the result. As a result we speed up regional
2087 register allocation. */
2089 /* The function changes the object in range list given by R to OBJ. */
2091 change_object_in_range_list (live_range_t r
, ira_object_t obj
)
2093 for (; r
!= NULL
; r
= r
->next
)
2097 /* Move all live ranges associated with allocno FROM to allocno TO. */
2099 move_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2102 int n
= ALLOCNO_NUM_OBJECTS (from
);
2104 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2106 for (i
= 0; i
< n
; i
++)
2108 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2109 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2110 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2112 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2114 fprintf (ira_dump_file
,
2115 " Moving ranges of a%dr%d to a%dr%d: ",
2116 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2117 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2118 ira_print_live_range_list (ira_dump_file
, lr
);
2120 change_object_in_range_list (lr
, to_obj
);
2121 OBJECT_LIVE_RANGES (to_obj
)
2122 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2123 OBJECT_LIVE_RANGES (from_obj
) = NULL
;
2128 copy_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2131 int n
= ALLOCNO_NUM_OBJECTS (from
);
2133 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2135 for (i
= 0; i
< n
; i
++)
2137 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2138 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2139 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2141 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2143 fprintf (ira_dump_file
, " Copying ranges of a%dr%d to a%dr%d: ",
2144 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2145 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2146 ira_print_live_range_list (ira_dump_file
, lr
);
2148 lr
= ira_copy_live_range_list (lr
);
2149 change_object_in_range_list (lr
, to_obj
);
2150 OBJECT_LIVE_RANGES (to_obj
)
2151 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2155 /* Return TRUE if NODE represents a loop with low register
2158 low_pressure_loop_node_p (ira_loop_tree_node_t node
)
2161 enum reg_class pclass
;
2163 if (node
->bb
!= NULL
)
2166 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2168 pclass
= ira_pressure_classes
[i
];
2169 if (node
->reg_pressure
[pclass
] > ira_class_hard_regs_num
[pclass
]
2170 && ira_class_hard_regs_num
[pclass
] > 1)
2177 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2178 form a region from such loop if the target use stack register
2179 because reg-stack.c can not deal with such edges. */
2181 loop_with_complex_edge_p (struct loop
*loop
)
2189 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
2190 if (e
->flags
& EDGE_EH
)
2192 edges
= get_loop_exit_edges (loop
);
2194 FOR_EACH_VEC_ELT (edges
, i
, e
)
2195 if (e
->flags
& EDGE_COMPLEX
)
2205 /* Sort loops for marking them for removal. We put already marked
2206 loops first, then less frequent loops next, and then outer loops
2209 loop_compare_func (const void *v1p
, const void *v2p
)
2212 ira_loop_tree_node_t l1
= *(const ira_loop_tree_node_t
*) v1p
;
2213 ira_loop_tree_node_t l2
= *(const ira_loop_tree_node_t
*) v2p
;
2215 ira_assert (l1
->parent
!= NULL
&& l2
->parent
!= NULL
);
2216 if (l1
->to_remove_p
&& ! l2
->to_remove_p
)
2218 if (! l1
->to_remove_p
&& l2
->to_remove_p
)
2220 if ((diff
= l1
->loop
->header
->frequency
- l2
->loop
->header
->frequency
) != 0)
2222 if ((diff
= (int) loop_depth (l1
->loop
) - (int) loop_depth (l2
->loop
)) != 0)
2224 /* Make sorting stable. */
2225 return l1
->loop_num
- l2
->loop_num
;
2228 /* Mark loops which should be removed from regional allocation. We
2229 remove a loop with low register pressure inside another loop with
2230 register pressure. In this case a separate allocation of the loop
2231 hardly helps (for irregular register file architecture it could
2232 help by choosing a better hard register in the loop but we prefer
2233 faster allocation even in this case). We also remove cheap loops
2234 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2235 exit or enter edges are removed too because the allocation might
2236 require put pseudo moves on the EH edges (we could still do this
2237 for pseudos with caller saved hard registers in some cases but it
2238 is impossible to say here or during top-down allocation pass what
2239 hard register the pseudos get finally). */
2241 mark_loops_for_removal (void)
2244 ira_loop_tree_node_t
*sorted_loops
;
2247 ira_assert (current_loops
!= NULL
);
2249 = (ira_loop_tree_node_t
*) ira_allocate (sizeof (ira_loop_tree_node_t
)
2250 * number_of_loops (cfun
));
2251 for (n
= i
= 0; vec_safe_iterate (get_loops (cfun
), i
, &loop
); i
++)
2252 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2254 if (ira_loop_nodes
[i
].parent
== NULL
)
2256 /* Don't remove the root. */
2257 ira_loop_nodes
[i
].to_remove_p
= false;
2260 sorted_loops
[n
++] = &ira_loop_nodes
[i
];
2261 ira_loop_nodes
[i
].to_remove_p
2262 = ((low_pressure_loop_node_p (ira_loop_nodes
[i
].parent
)
2263 && low_pressure_loop_node_p (&ira_loop_nodes
[i
]))
2265 || loop_with_complex_edge_p (ira_loop_nodes
[i
].loop
)
2269 qsort (sorted_loops
, n
, sizeof (ira_loop_tree_node_t
), loop_compare_func
);
2270 for (i
= 0; n
- i
+ 1 > IRA_MAX_LOOPS_NUM
; i
++)
2272 sorted_loops
[i
]->to_remove_p
= true;
2273 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2276 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2277 sorted_loops
[i
]->loop_num
, sorted_loops
[i
]->loop
->header
->index
,
2278 sorted_loops
[i
]->loop
->header
->frequency
,
2279 loop_depth (sorted_loops
[i
]->loop
),
2280 low_pressure_loop_node_p (sorted_loops
[i
]->parent
)
2281 && low_pressure_loop_node_p (sorted_loops
[i
])
2282 ? "low pressure" : "cheap loop");
2284 ira_free (sorted_loops
);
2287 /* Mark all loops but root for removing. */
2289 mark_all_loops_for_removal (void)
2294 ira_assert (current_loops
!= NULL
);
2295 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
2296 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2298 if (ira_loop_nodes
[i
].parent
== NULL
)
2300 /* Don't remove the root. */
2301 ira_loop_nodes
[i
].to_remove_p
= false;
2304 ira_loop_nodes
[i
].to_remove_p
= true;
2305 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2308 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2309 ira_loop_nodes
[i
].loop_num
,
2310 ira_loop_nodes
[i
].loop
->header
->index
,
2311 ira_loop_nodes
[i
].loop
->header
->frequency
,
2312 loop_depth (ira_loop_nodes
[i
].loop
));
2316 /* Definition of vector of loop tree nodes. */
2318 /* Vec containing references to all removed loop tree nodes. */
2319 static vec
<ira_loop_tree_node_t
> removed_loop_vec
;
2321 /* Vec containing references to all children of loop tree nodes. */
2322 static vec
<ira_loop_tree_node_t
> children_vec
;
2324 /* Remove subregions of NODE if their separate allocation will not
2325 improve the result. */
2327 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node
)
2331 ira_loop_tree_node_t subnode
;
2333 remove_p
= node
->to_remove_p
;
2335 children_vec
.safe_push (node
);
2336 start
= children_vec
.length ();
2337 for (subnode
= node
->children
; subnode
!= NULL
; subnode
= subnode
->next
)
2338 if (subnode
->bb
== NULL
)
2339 remove_uneccesary_loop_nodes_from_loop_tree (subnode
);
2341 children_vec
.safe_push (subnode
);
2342 node
->children
= node
->subloops
= NULL
;
2345 removed_loop_vec
.safe_push (node
);
2348 while (children_vec
.length () > start
)
2350 subnode
= children_vec
.pop ();
2351 subnode
->parent
= node
;
2352 subnode
->next
= node
->children
;
2353 node
->children
= subnode
;
2354 if (subnode
->bb
== NULL
)
2356 subnode
->subloop_next
= node
->subloops
;
2357 node
->subloops
= subnode
;
2362 /* Return TRUE if NODE is inside PARENT. */
2364 loop_is_inside_p (ira_loop_tree_node_t node
, ira_loop_tree_node_t parent
)
2366 for (node
= node
->parent
; node
!= NULL
; node
= node
->parent
)
2372 /* Sort allocnos according to their order in regno allocno list. */
2374 regno_allocno_order_compare_func (const void *v1p
, const void *v2p
)
2376 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2377 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2378 ira_loop_tree_node_t n1
= ALLOCNO_LOOP_TREE_NODE (a1
);
2379 ira_loop_tree_node_t n2
= ALLOCNO_LOOP_TREE_NODE (a2
);
2381 if (loop_is_inside_p (n1
, n2
))
2383 else if (loop_is_inside_p (n2
, n1
))
2385 /* If allocnos are equally good, sort by allocno numbers, so that
2386 the results of qsort leave nothing to chance. We put allocnos
2387 with higher number first in the list because it is the original
2388 order for allocnos from loops on the same levels. */
2389 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2392 /* This array is used to sort allocnos to restore allocno order in
2393 the regno allocno list. */
2394 static ira_allocno_t
*regno_allocnos
;
2396 /* Restore allocno order for REGNO in the regno allocno list. */
2398 ira_rebuild_regno_allocno_list (int regno
)
2403 for (n
= 0, a
= ira_regno_allocno_map
[regno
];
2405 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2406 regno_allocnos
[n
++] = a
;
2408 qsort (regno_allocnos
, n
, sizeof (ira_allocno_t
),
2409 regno_allocno_order_compare_func
);
2410 for (i
= 1; i
< n
; i
++)
2411 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[i
- 1]) = regno_allocnos
[i
];
2412 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[n
- 1]) = NULL
;
2413 ira_regno_allocno_map
[regno
] = regno_allocnos
[0];
2414 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2415 fprintf (ira_dump_file
, " Rebuilding regno allocno list for %d\n", regno
);
2418 /* Propagate info from allocno FROM_A to allocno A. */
2420 propagate_some_info_from_allocno (ira_allocno_t a
, ira_allocno_t from_a
)
2422 enum reg_class aclass
;
2424 merge_hard_reg_conflicts (from_a
, a
, false);
2425 ALLOCNO_NREFS (a
) += ALLOCNO_NREFS (from_a
);
2426 ALLOCNO_FREQ (a
) += ALLOCNO_FREQ (from_a
);
2427 ALLOCNO_CALL_FREQ (a
) += ALLOCNO_CALL_FREQ (from_a
);
2428 ALLOCNO_CALLS_CROSSED_NUM (a
) += ALLOCNO_CALLS_CROSSED_NUM (from_a
);
2429 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
)
2430 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a
);
2431 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2432 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a
);
2433 if (! ALLOCNO_BAD_SPILL_P (from_a
))
2434 ALLOCNO_BAD_SPILL_P (a
) = false;
2435 aclass
= ALLOCNO_CLASS (from_a
);
2436 ira_assert (aclass
== ALLOCNO_CLASS (a
));
2437 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
2438 ALLOCNO_HARD_REG_COSTS (from_a
));
2439 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
2441 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a
));
2442 ALLOCNO_CLASS_COST (a
) += ALLOCNO_CLASS_COST (from_a
);
2443 ALLOCNO_MEMORY_COST (a
) += ALLOCNO_MEMORY_COST (from_a
);
2446 /* Remove allocnos from loops removed from the allocation
2449 remove_unnecessary_allocnos (void)
2452 bool merged_p
, rebuild_p
;
2453 ira_allocno_t a
, prev_a
, next_a
, parent_a
;
2454 ira_loop_tree_node_t a_node
, parent
;
2457 regno_allocnos
= NULL
;
2458 for (regno
= max_reg_num () - 1; regno
>= FIRST_PSEUDO_REGISTER
; regno
--)
2461 for (prev_a
= NULL
, a
= ira_regno_allocno_map
[regno
];
2465 next_a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
);
2466 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2467 if (! a_node
->to_remove_p
)
2471 for (parent
= a_node
->parent
;
2472 (parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
2473 && parent
->to_remove_p
;
2474 parent
= parent
->parent
)
2476 if (parent_a
== NULL
)
2478 /* There are no allocnos with the same regno in
2479 upper region -- just move the allocno to the
2482 ALLOCNO_LOOP_TREE_NODE (a
) = parent
;
2483 parent
->regno_allocno_map
[regno
] = a
;
2484 bitmap_set_bit (parent
->all_allocnos
, ALLOCNO_NUM (a
));
2489 /* Remove the allocno and update info of allocno in
2490 the upper region. */
2492 ira_regno_allocno_map
[regno
] = next_a
;
2494 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a
) = next_a
;
2495 move_allocno_live_ranges (a
, parent_a
);
2497 propagate_some_info_from_allocno (parent_a
, a
);
2498 /* Remove it from the corresponding regno allocno
2499 map to avoid info propagation of subsequent
2500 allocno into this already removed allocno. */
2501 a_node
->regno_allocno_map
[regno
] = NULL
;
2502 ira_remove_allocno_prefs (a
);
2508 /* We need to restore the order in regno allocno list. */
2510 if (regno_allocnos
== NULL
)
2512 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
2513 * ira_allocnos_num
);
2514 ira_rebuild_regno_allocno_list (regno
);
2518 ira_rebuild_start_finish_chains ();
2519 if (regno_allocnos
!= NULL
)
2520 ira_free (regno_allocnos
);
2523 /* Remove allocnos from all loops but the root. */
2525 remove_low_level_allocnos (void)
2528 bool merged_p
, propagate_p
;
2529 ira_allocno_t a
, top_a
;
2530 ira_loop_tree_node_t a_node
, parent
;
2531 ira_allocno_iterator ai
;
2534 FOR_EACH_ALLOCNO (a
, ai
)
2536 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2537 if (a_node
== ira_loop_tree_root
|| ALLOCNO_CAP_MEMBER (a
) != NULL
)
2539 regno
= ALLOCNO_REGNO (a
);
2540 if ((top_a
= ira_loop_tree_root
->regno_allocno_map
[regno
]) == NULL
)
2542 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
2543 ira_loop_tree_root
->regno_allocno_map
[regno
] = a
;
2546 propagate_p
= a_node
->parent
->regno_allocno_map
[regno
] == NULL
;
2547 /* Remove the allocno and update info of allocno in the upper
2549 move_allocno_live_ranges (a
, top_a
);
2552 propagate_some_info_from_allocno (top_a
, a
);
2554 FOR_EACH_ALLOCNO (a
, ai
)
2556 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2557 if (a_node
== ira_loop_tree_root
)
2559 parent
= a_node
->parent
;
2560 regno
= ALLOCNO_REGNO (a
);
2561 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2562 ira_assert (ALLOCNO_CAP (a
) != NULL
);
2563 else if (ALLOCNO_CAP (a
) == NULL
)
2564 ira_assert (parent
->regno_allocno_map
[regno
] != NULL
);
2566 FOR_EACH_ALLOCNO (a
, ai
)
2568 regno
= ALLOCNO_REGNO (a
);
2569 if (ira_loop_tree_root
->regno_allocno_map
[regno
] == a
)
2572 ira_allocno_object_iterator oi
;
2574 ira_regno_allocno_map
[regno
] = a
;
2575 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = NULL
;
2576 ALLOCNO_CAP_MEMBER (a
) = NULL
;
2577 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2578 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
2579 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
2581 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a
))
2582 ALLOCNO_NO_STACK_REG_P (a
) = true;
2587 ira_remove_allocno_prefs (a
);
2592 ira_rebuild_start_finish_chains ();
2595 /* Remove loops from consideration. We remove all loops except for
2596 root if ALL_P or loops for which a separate allocation will not
2597 improve the result. We have to do this after allocno creation and
2598 their costs and allocno class evaluation because only after that
2599 the register pressure can be known and is calculated. */
2601 remove_unnecessary_regions (bool all_p
)
2603 if (current_loops
== NULL
)
2606 mark_all_loops_for_removal ();
2608 mark_loops_for_removal ();
2609 children_vec
.create (last_basic_block_for_fn (cfun
)
2610 + number_of_loops (cfun
));
2611 removed_loop_vec
.create (last_basic_block_for_fn (cfun
)
2612 + number_of_loops (cfun
));
2613 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root
);
2614 children_vec
.release ();
2616 remove_low_level_allocnos ();
2618 remove_unnecessary_allocnos ();
2619 while (removed_loop_vec
.length () > 0)
2620 finish_loop_tree_node (removed_loop_vec
.pop ());
2621 removed_loop_vec
.release ();
2626 /* At this point true value of allocno attribute bad_spill_p means
2627 that there is an insn where allocno occurs and where the allocno
2628 can not be used as memory. The function updates the attribute, now
2629 it can be true only for allocnos which can not be used as memory in
2630 an insn and in whose live ranges there is other allocno deaths.
2631 Spilling allocnos with true value will not improve the code because
2632 it will not make other allocnos colorable and additional reloads
2633 for the corresponding pseudo will be generated in reload pass for
2634 each insn it occurs.
2636 This is a trick mentioned in one classic article of Chaitin etc
2637 which is frequently omitted in other implementations of RA based on
2640 update_bad_spill_attribute (void)
2644 ira_allocno_iterator ai
;
2645 ira_allocno_object_iterator aoi
;
2648 enum reg_class aclass
;
2649 bitmap_head dead_points
[N_REG_CLASSES
];
2651 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2653 aclass
= ira_allocno_classes
[i
];
2654 bitmap_initialize (&dead_points
[aclass
], ®_obstack
);
2656 FOR_EACH_ALLOCNO (a
, ai
)
2658 aclass
= ALLOCNO_CLASS (a
);
2659 if (aclass
== NO_REGS
)
2661 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2662 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2663 bitmap_set_bit (&dead_points
[aclass
], r
->finish
);
2665 FOR_EACH_ALLOCNO (a
, ai
)
2667 aclass
= ALLOCNO_CLASS (a
);
2668 if (aclass
== NO_REGS
)
2670 if (! ALLOCNO_BAD_SPILL_P (a
))
2672 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2674 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2676 for (i
= r
->start
+ 1; i
< r
->finish
; i
++)
2677 if (bitmap_bit_p (&dead_points
[aclass
], i
))
2684 ALLOCNO_BAD_SPILL_P (a
) = false;
2689 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2691 aclass
= ira_allocno_classes
[i
];
2692 bitmap_clear (&dead_points
[aclass
]);
2698 /* Set up minimal and maximal live range points for allocnos. */
2700 setup_min_max_allocno_live_range_point (void)
2703 ira_allocno_t a
, parent_a
, cap
;
2704 ira_allocno_iterator ai
;
2705 #ifdef ENABLE_IRA_CHECKING
2706 ira_object_iterator oi
;
2710 ira_loop_tree_node_t parent
;
2712 FOR_EACH_ALLOCNO (a
, ai
)
2714 int n
= ALLOCNO_NUM_OBJECTS (a
);
2716 for (i
= 0; i
< n
; i
++)
2718 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2719 r
= OBJECT_LIVE_RANGES (obj
);
2722 OBJECT_MAX (obj
) = r
->finish
;
2723 for (; r
->next
!= NULL
; r
= r
->next
)
2725 OBJECT_MIN (obj
) = r
->start
;
2728 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2729 for (a
= ira_regno_allocno_map
[i
];
2731 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2734 int n
= ALLOCNO_NUM_OBJECTS (a
);
2736 for (j
= 0; j
< n
; j
++)
2738 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
2739 ira_object_t parent_obj
;
2741 if (OBJECT_MAX (obj
) < 0)
2743 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2744 /* Accumulation of range info. */
2745 if (ALLOCNO_CAP (a
) != NULL
)
2747 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
2749 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
2750 if (OBJECT_MAX (cap_obj
) < OBJECT_MAX (obj
))
2751 OBJECT_MAX (cap_obj
) = OBJECT_MAX (obj
);
2752 if (OBJECT_MIN (cap_obj
) > OBJECT_MIN (obj
))
2753 OBJECT_MIN (cap_obj
) = OBJECT_MIN (obj
);
2757 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) == NULL
)
2759 parent_a
= parent
->regno_allocno_map
[i
];
2760 parent_obj
= ALLOCNO_OBJECT (parent_a
, j
);
2761 if (OBJECT_MAX (parent_obj
) < OBJECT_MAX (obj
))
2762 OBJECT_MAX (parent_obj
) = OBJECT_MAX (obj
);
2763 if (OBJECT_MIN (parent_obj
) > OBJECT_MIN (obj
))
2764 OBJECT_MIN (parent_obj
) = OBJECT_MIN (obj
);
2767 #ifdef ENABLE_IRA_CHECKING
2768 FOR_EACH_OBJECT (obj
, oi
)
2770 if ((0 <= OBJECT_MIN (obj
) && OBJECT_MIN (obj
) <= ira_max_point
)
2771 && (0 <= OBJECT_MAX (obj
) && OBJECT_MAX (obj
) <= ira_max_point
))
2778 /* Sort allocnos according to their live ranges. Allocnos with
2779 smaller allocno class are put first unless we use priority
2780 coloring. Allocnos with the same class are ordered according
2781 their start (min). Allocnos with the same start are ordered
2782 according their finish (max). */
2784 object_range_compare_func (const void *v1p
, const void *v2p
)
2787 ira_object_t obj1
= *(const ira_object_t
*) v1p
;
2788 ira_object_t obj2
= *(const ira_object_t
*) v2p
;
2789 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
2790 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
2792 if ((diff
= OBJECT_MIN (obj1
) - OBJECT_MIN (obj2
)) != 0)
2794 if ((diff
= OBJECT_MAX (obj1
) - OBJECT_MAX (obj2
)) != 0)
2796 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2799 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2801 sort_conflict_id_map (void)
2805 ira_allocno_iterator ai
;
2808 FOR_EACH_ALLOCNO (a
, ai
)
2810 ira_allocno_object_iterator oi
;
2813 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2814 ira_object_id_map
[num
++] = obj
;
2816 qsort (ira_object_id_map
, num
, sizeof (ira_object_t
),
2817 object_range_compare_func
);
2818 for (i
= 0; i
< num
; i
++)
2820 ira_object_t obj
= ira_object_id_map
[i
];
2822 gcc_assert (obj
!= NULL
);
2823 OBJECT_CONFLICT_ID (obj
) = i
;
2825 for (i
= num
; i
< ira_objects_num
; i
++)
2826 ira_object_id_map
[i
] = NULL
;
2829 /* Set up minimal and maximal conflict ids of allocnos with which
2830 given allocno can conflict. */
2832 setup_min_max_conflict_allocno_ids (void)
2835 int i
, j
, min
, max
, start
, finish
, first_not_finished
, filled_area_start
;
2836 int *live_range_min
, *last_lived
;
2837 int word0_min
, word0_max
;
2839 ira_allocno_iterator ai
;
2841 live_range_min
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
2843 first_not_finished
= -1;
2844 for (i
= 0; i
< ira_objects_num
; i
++)
2846 ira_object_t obj
= ira_object_id_map
[i
];
2851 a
= OBJECT_ALLOCNO (obj
);
2855 aclass
= ALLOCNO_CLASS (a
);
2857 first_not_finished
= i
;
2861 start
= OBJECT_MIN (obj
);
2862 /* If we skip an allocno, the allocno with smaller ids will
2863 be also skipped because of the secondary sorting the
2864 range finishes (see function
2865 object_range_compare_func). */
2866 while (first_not_finished
< i
2867 && start
> OBJECT_MAX (ira_object_id_map
2868 [first_not_finished
]))
2869 first_not_finished
++;
2870 min
= first_not_finished
;
2873 /* We could increase min further in this case but it is good
2876 live_range_min
[i
] = OBJECT_MIN (obj
);
2877 OBJECT_MIN (obj
) = min
;
2879 last_lived
= (int *) ira_allocate (sizeof (int) * ira_max_point
);
2881 filled_area_start
= -1;
2882 for (i
= ira_objects_num
- 1; i
>= 0; i
--)
2884 ira_object_t obj
= ira_object_id_map
[i
];
2889 a
= OBJECT_ALLOCNO (obj
);
2892 aclass
= ALLOCNO_CLASS (a
);
2893 for (j
= 0; j
< ira_max_point
; j
++)
2895 filled_area_start
= ira_max_point
;
2897 min
= live_range_min
[i
];
2898 finish
= OBJECT_MAX (obj
);
2899 max
= last_lived
[finish
];
2901 /* We could decrease max further in this case but it is good
2903 max
= OBJECT_CONFLICT_ID (obj
) - 1;
2904 OBJECT_MAX (obj
) = max
;
2905 /* In filling, we can go further A range finish to recognize
2906 intersection quickly because if the finish of subsequently
2907 processed allocno (it has smaller conflict id) range is
2908 further A range finish than they are definitely intersected
2909 (the reason for this is the allocnos with bigger conflict id
2910 have their range starts not smaller than allocnos with
2912 for (j
= min
; j
< filled_area_start
; j
++)
2914 filled_area_start
= min
;
2916 ira_free (last_lived
);
2917 ira_free (live_range_min
);
2919 /* For allocnos with more than one object, we may later record extra conflicts in
2920 subobject 0 that we cannot really know about here.
2921 For now, simply widen the min/max range of these subobjects. */
2923 word0_min
= INT_MAX
;
2924 word0_max
= INT_MIN
;
2926 FOR_EACH_ALLOCNO (a
, ai
)
2928 int n
= ALLOCNO_NUM_OBJECTS (a
);
2933 obj0
= ALLOCNO_OBJECT (a
, 0);
2934 if (OBJECT_CONFLICT_ID (obj0
) < word0_min
)
2935 word0_min
= OBJECT_CONFLICT_ID (obj0
);
2936 if (OBJECT_CONFLICT_ID (obj0
) > word0_max
)
2937 word0_max
= OBJECT_CONFLICT_ID (obj0
);
2939 FOR_EACH_ALLOCNO (a
, ai
)
2941 int n
= ALLOCNO_NUM_OBJECTS (a
);
2946 obj0
= ALLOCNO_OBJECT (a
, 0);
2947 if (OBJECT_MIN (obj0
) > word0_min
)
2948 OBJECT_MIN (obj0
) = word0_min
;
2949 if (OBJECT_MAX (obj0
) < word0_max
)
2950 OBJECT_MAX (obj0
) = word0_max
;
2960 ira_allocno_iterator ai
;
2961 ira_loop_tree_node_t loop_tree_node
;
2963 FOR_EACH_ALLOCNO (a
, ai
)
2965 if (ALLOCNO_LOOP_TREE_NODE (a
) == ira_loop_tree_root
)
2967 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2968 create_cap_allocno (a
);
2969 else if (ALLOCNO_CAP (a
) == NULL
)
2971 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2972 if (!bitmap_bit_p (loop_tree_node
->border_allocnos
, ALLOCNO_NUM (a
)))
2973 create_cap_allocno (a
);
2980 /* The page contains code transforming more one region internal
2981 representation (IR) to one region IR which is necessary for reload.
2982 This transformation is called IR flattening. We might just rebuild
2983 the IR for one region but we don't do it because it takes a lot of
2986 /* Map: regno -> allocnos which will finally represent the regno for
2987 IR with one region. */
2988 static ira_allocno_t
*regno_top_level_allocno_map
;
2990 /* Find the allocno that corresponds to A at a level one higher up in the
2991 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2993 ira_parent_allocno (ira_allocno_t a
)
2995 ira_loop_tree_node_t parent
;
2997 if (ALLOCNO_CAP (a
) != NULL
)
3000 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3004 return parent
->regno_allocno_map
[ALLOCNO_REGNO (a
)];
3007 /* Find the allocno that corresponds to A at a level one higher up in the
3008 loop tree. If ALLOCNO_CAP is set for A, return that. */
3010 ira_parent_or_cap_allocno (ira_allocno_t a
)
3012 if (ALLOCNO_CAP (a
) != NULL
)
3013 return ALLOCNO_CAP (a
);
3015 return ira_parent_allocno (a
);
3018 /* Process all allocnos originated from pseudo REGNO and copy live
3019 ranges, hard reg conflicts, and allocno stack reg attributes from
3020 low level allocnos to final allocnos which are destinations of
3021 removed stores at a loop exit. Return true if we copied live
3024 copy_info_to_removed_store_destinations (int regno
)
3027 ira_allocno_t parent_a
= NULL
;
3028 ira_loop_tree_node_t parent
;
3032 for (a
= ira_regno_allocno_map
[regno
];
3034 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3036 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))])
3037 /* This allocno will be removed. */
3040 /* Caps will be removed. */
3041 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3042 for (parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3044 parent
= parent
->parent
)
3045 if ((parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
3047 == regno_top_level_allocno_map
[REGNO
3048 (allocno_emit_reg (parent_a
))]
3049 && ALLOCNO_EMIT_DATA (parent_a
)->mem_optimized_dest_p
))
3051 if (parent
== NULL
|| parent_a
== NULL
)
3054 copy_allocno_live_ranges (a
, parent_a
);
3055 merge_hard_reg_conflicts (a
, parent_a
, true);
3057 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
3058 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3059 += ALLOCNO_CALLS_CROSSED_NUM (a
);
3060 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3061 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3062 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3063 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3069 /* Flatten the IR. In other words, this function transforms IR as if
3070 it were built with one region (without loops). We could make it
3071 much simpler by rebuilding IR with one region, but unfortunately it
3072 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3073 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3074 IRA_MAX_POINT before emitting insns on the loop borders. */
3076 ira_flattening (int max_regno_before_emit
, int ira_max_point_before_emit
)
3081 bool new_pseudos_p
, merged_p
, mem_dest_p
;
3083 enum reg_class aclass
;
3084 ira_allocno_t a
, parent_a
, first
, second
, node_first
, node_second
;
3086 ira_loop_tree_node_t node
;
3088 ira_allocno_iterator ai
;
3089 ira_copy_iterator ci
;
3091 regno_top_level_allocno_map
3092 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
3093 * sizeof (ira_allocno_t
));
3094 memset (regno_top_level_allocno_map
, 0,
3095 max_reg_num () * sizeof (ira_allocno_t
));
3096 new_pseudos_p
= merged_p
= false;
3097 FOR_EACH_ALLOCNO (a
, ai
)
3099 ira_allocno_object_iterator oi
;
3102 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3103 /* Caps are not in the regno allocno maps and they are never
3104 will be transformed into allocnos existing after IR
3107 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3108 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
3109 OBJECT_CONFLICT_HARD_REGS (obj
));
3111 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = ALLOCNO_NO_STACK_REG_P (a
);
3114 /* Fix final allocno attributes. */
3115 for (i
= max_regno_before_emit
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
3118 for (a
= ira_regno_allocno_map
[i
];
3120 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3122 ira_emit_data_t parent_data
, data
= ALLOCNO_EMIT_DATA (a
);
3124 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3125 if (data
->somewhere_renamed_p
)
3126 new_pseudos_p
= true;
3127 parent_a
= ira_parent_allocno (a
);
3128 if (parent_a
== NULL
)
3130 ALLOCNO_COPIES (a
) = NULL
;
3131 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3134 ira_assert (ALLOCNO_CAP_MEMBER (parent_a
) == NULL
);
3136 if (data
->mem_optimized_dest
!= NULL
)
3138 parent_data
= ALLOCNO_EMIT_DATA (parent_a
);
3139 if (REGNO (data
->reg
) == REGNO (parent_data
->reg
))
3141 merge_hard_reg_conflicts (a
, parent_a
, true);
3142 move_allocno_live_ranges (a
, parent_a
);
3144 parent_data
->mem_optimized_dest_p
3145 = (parent_data
->mem_optimized_dest_p
3146 || data
->mem_optimized_dest_p
);
3149 new_pseudos_p
= true;
3152 ALLOCNO_NREFS (parent_a
) -= ALLOCNO_NREFS (a
);
3153 ALLOCNO_FREQ (parent_a
) -= ALLOCNO_FREQ (a
);
3154 ALLOCNO_CALL_FREQ (parent_a
) -= ALLOCNO_CALL_FREQ (a
);
3155 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3156 -= ALLOCNO_CALLS_CROSSED_NUM (a
);
3157 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3158 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3159 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3160 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3161 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a
) >= 0
3162 && ALLOCNO_NREFS (parent_a
) >= 0
3163 && ALLOCNO_FREQ (parent_a
) >= 0);
3164 aclass
= ALLOCNO_CLASS (parent_a
);
3165 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
3166 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
3167 && ALLOCNO_HARD_REG_COSTS (parent_a
) != NULL
)
3168 for (j
= 0; j
< hard_regs_num
; j
++)
3169 ALLOCNO_HARD_REG_COSTS (parent_a
)[j
]
3170 -= ALLOCNO_HARD_REG_COSTS (a
)[j
];
3171 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
3172 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
) != NULL
)
3173 for (j
= 0; j
< hard_regs_num
; j
++)
3174 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
)[j
]
3175 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[j
];
3176 ALLOCNO_CLASS_COST (parent_a
)
3177 -= ALLOCNO_CLASS_COST (a
);
3178 ALLOCNO_MEMORY_COST (parent_a
) -= ALLOCNO_MEMORY_COST (a
);
3179 parent_a
= ira_parent_allocno (parent_a
);
3180 if (parent_a
== NULL
)
3183 ALLOCNO_COPIES (a
) = NULL
;
3184 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3186 if (mem_dest_p
&& copy_info_to_removed_store_destinations (i
))
3189 ira_assert (new_pseudos_p
|| ira_max_point_before_emit
== ira_max_point
);
3190 if (merged_p
|| ira_max_point_before_emit
!= ira_max_point
)
3191 ira_rebuild_start_finish_chains ();
3194 sparseset objects_live
;
3196 /* Rebuild conflicts. */
3197 FOR_EACH_ALLOCNO (a
, ai
)
3199 ira_allocno_object_iterator oi
;
3202 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3203 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3205 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3207 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3208 ira_assert (r
->object
== obj
);
3209 clear_conflicts (obj
);
3212 objects_live
= sparseset_alloc (ira_objects_num
);
3213 for (i
= 0; i
< ira_max_point
; i
++)
3215 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
3217 ira_object_t obj
= r
->object
;
3219 a
= OBJECT_ALLOCNO (obj
);
3220 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3221 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3224 aclass
= ALLOCNO_CLASS (a
);
3225 sparseset_set_bit (objects_live
, OBJECT_CONFLICT_ID (obj
));
3226 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, n
)
3228 ira_object_t live_obj
= ira_object_id_map
[n
];
3229 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
3230 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
3232 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
3233 /* Don't set up conflict for the allocno with itself. */
3235 ira_add_conflict (obj
, live_obj
);
3239 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
3240 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
3242 sparseset_free (objects_live
);
3243 compress_conflict_vecs ();
3245 /* Mark some copies for removing and change allocnos in the rest
3247 FOR_EACH_COPY (cp
, ci
)
3249 if (ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
3250 || ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
)
3252 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3254 (ira_dump_file
, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3255 cp
->num
, ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
? 'c' : 'a',
3256 ALLOCNO_NUM (cp
->first
),
3257 REGNO (allocno_emit_reg (cp
->first
)),
3258 ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
? 'c' : 'a',
3259 ALLOCNO_NUM (cp
->second
),
3260 REGNO (allocno_emit_reg (cp
->second
)));
3261 cp
->loop_tree_node
= NULL
;
3265 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->first
))];
3267 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->second
))];
3268 node
= cp
->loop_tree_node
;
3270 keep_p
= true; /* It copy generated in ira-emit.c. */
3273 /* Check that the copy was not propagated from level on
3274 which we will have different pseudos. */
3275 node_first
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->first
)];
3276 node_second
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->second
)];
3277 keep_p
= ((REGNO (allocno_emit_reg (first
))
3278 == REGNO (allocno_emit_reg (node_first
)))
3279 && (REGNO (allocno_emit_reg (second
))
3280 == REGNO (allocno_emit_reg (node_second
))));
3284 cp
->loop_tree_node
= ira_loop_tree_root
;
3286 cp
->second
= second
;
3290 cp
->loop_tree_node
= NULL
;
3291 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3292 fprintf (ira_dump_file
, " Remove cp%d:a%dr%d-a%dr%d\n",
3293 cp
->num
, ALLOCNO_NUM (cp
->first
),
3294 REGNO (allocno_emit_reg (cp
->first
)),
3295 ALLOCNO_NUM (cp
->second
),
3296 REGNO (allocno_emit_reg (cp
->second
)));
3299 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3300 FOR_EACH_ALLOCNO (a
, ai
)
3302 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3303 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3305 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3306 fprintf (ira_dump_file
, " Remove a%dr%d\n",
3307 ALLOCNO_NUM (a
), REGNO (allocno_emit_reg (a
)));
3308 ira_remove_allocno_prefs (a
);
3312 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
3313 ALLOCNO_REGNO (a
) = REGNO (allocno_emit_reg (a
));
3314 ALLOCNO_CAP (a
) = NULL
;
3315 /* Restore updated costs for assignments from reload. */
3316 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
3317 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
3318 if (! ALLOCNO_ASSIGNED_P (a
))
3319 ira_free_allocno_updated_costs (a
);
3320 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3321 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3323 /* Remove unnecessary copies. */
3324 FOR_EACH_COPY (cp
, ci
)
3326 if (cp
->loop_tree_node
== NULL
)
3328 ira_copies
[cp
->num
] = NULL
;
3333 (ALLOCNO_LOOP_TREE_NODE (cp
->first
) == ira_loop_tree_root
3334 && ALLOCNO_LOOP_TREE_NODE (cp
->second
) == ira_loop_tree_root
);
3335 add_allocno_copy_to_list (cp
);
3336 swap_allocno_copy_ends_if_necessary (cp
);
3338 rebuild_regno_allocno_maps ();
3339 if (ira_max_point
!= ira_max_point_before_emit
)
3340 ira_compress_allocno_live_ranges ();
3341 ira_free (regno_top_level_allocno_map
);
3346 #ifdef ENABLE_IRA_CHECKING
3347 /* Check creation of all allocnos. Allocnos on lower levels should
3348 have allocnos or caps on all upper levels. */
3350 check_allocno_creation (void)
3353 ira_allocno_iterator ai
;
3354 ira_loop_tree_node_t loop_tree_node
;
3356 FOR_EACH_ALLOCNO (a
, ai
)
3358 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3359 ira_assert (bitmap_bit_p (loop_tree_node
->all_allocnos
,
3361 if (loop_tree_node
== ira_loop_tree_root
)
3363 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3364 ira_assert (ALLOCNO_CAP (a
) != NULL
);
3365 else if (ALLOCNO_CAP (a
) == NULL
)
3366 ira_assert (loop_tree_node
->parent
3367 ->regno_allocno_map
[ALLOCNO_REGNO (a
)] != NULL
3368 && bitmap_bit_p (loop_tree_node
->border_allocnos
,
3374 /* Identify allocnos which prefer a register class with a single hard register.
3375 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3376 less likely to use the preferred singleton register. */
3378 update_conflict_hard_reg_costs (void)
3381 ira_allocno_iterator ai
;
3384 FOR_EACH_ALLOCNO (a
, ai
)
3386 reg_class_t aclass
= ALLOCNO_CLASS (a
);
3387 reg_class_t pref
= reg_preferred_class (ALLOCNO_REGNO (a
));
3388 int singleton
= ira_class_singleton
[pref
][ALLOCNO_MODE (a
)];
3391 index
= ira_class_hard_reg_index
[(int) aclass
][singleton
];
3394 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) == NULL
3395 || ALLOCNO_HARD_REG_COSTS (a
) == NULL
)
3398 for (i
= ira_class_hard_regs_num
[(int) aclass
] - 1; i
>= 0; i
--)
3399 if (ALLOCNO_HARD_REG_COSTS (a
)[i
] > ALLOCNO_CLASS_COST (a
)
3400 && min
> ALLOCNO_HARD_REG_COSTS (a
)[i
])
3401 min
= ALLOCNO_HARD_REG_COSTS (a
)[i
];
3404 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
3406 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
]
3407 -= min
- ALLOCNO_CLASS_COST (a
);
3411 /* Create a internal representation (IR) for IRA (allocnos, copies,
3412 loop tree nodes). The function returns TRUE if we generate loop
3413 structure (besides nodes representing all function and the basic
3414 blocks) for regional allocation. A true return means that we
3415 really need to flatten IR before the reload. */
3422 initiate_cost_vectors ();
3423 initiate_allocnos ();
3426 create_loop_tree_nodes ();
3430 create_allocno_objects ();
3431 ira_create_allocno_live_ranges ();
3432 remove_unnecessary_regions (false);
3433 ira_compress_allocno_live_ranges ();
3434 update_bad_spill_attribute ();
3435 loops_p
= more_one_region_p ();
3438 propagate_allocno_info ();
3441 ira_tune_allocno_costs ();
3442 #ifdef ENABLE_IRA_CHECKING
3443 check_allocno_creation ();
3445 setup_min_max_allocno_live_range_point ();
3446 sort_conflict_id_map ();
3447 setup_min_max_conflict_allocno_ids ();
3448 ira_build_conflicts ();
3449 update_conflict_hard_reg_costs ();
3450 if (! ira_conflicts_p
)
3453 ira_allocno_iterator ai
;
3455 /* Remove all regions but root one. */
3458 remove_unnecessary_regions (true);
3461 /* We don't save hard registers around calls for fast allocation
3462 -- add caller clobbered registers as conflicting ones to
3463 allocno crossing calls. */
3464 FOR_EACH_ALLOCNO (a
, ai
)
3465 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3466 ior_hard_reg_conflicts (a
, &call_used_reg_set
);
3468 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3469 print_copies (ira_dump_file
);
3470 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3471 print_prefs (ira_dump_file
);
3472 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3477 ira_allocno_iterator ai
;
3482 FOR_EACH_ALLOCNO (a
, ai
)
3484 int j
, nobj
= ALLOCNO_NUM_OBJECTS (a
);
3488 for (j
= 0; j
< nobj
; j
++)
3490 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3491 n
+= OBJECT_NUM_CONFLICTS (obj
);
3492 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3496 fprintf (ira_dump_file
, " regions=%d, blocks=%d, points=%d\n",
3497 current_loops
== NULL
? 1 : number_of_loops (cfun
),
3498 n_basic_blocks_for_fn (cfun
), ira_max_point
);
3499 fprintf (ira_dump_file
,
3500 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3501 ira_allocnos_num
, nr_big
, ira_copies_num
, n
, nr
);
3506 /* Release the data created by function ira_build. */
3510 finish_loop_tree_nodes ();
3514 finish_cost_vectors ();
3515 ira_finish_allocno_live_ranges ();