1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2020 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "insn-config.h"
34 #include "sparseset.h"
37 static ira_copy_t
find_allocno_copy (ira_allocno_t
, ira_allocno_t
, rtx_insn
*,
38 ira_loop_tree_node_t
);
40 /* The root of the loop tree corresponding to the all function. */
41 ira_loop_tree_node_t ira_loop_tree_root
;
43 /* Height of the loop tree. */
44 int ira_loop_tree_height
;
46 /* All nodes representing basic blocks are referred through the
47 following array. We cannot use basic block member `aux' for this
48 because it is used for insertion of insns on edges. */
49 ira_loop_tree_node_t ira_bb_nodes
;
51 /* All nodes representing loops are referred through the following
53 ira_loop_tree_node_t ira_loop_nodes
;
55 /* And size of the ira_loop_nodes array. */
56 unsigned int ira_loop_nodes_count
;
58 /* Map regno -> allocnos with given regno (see comments for
59 allocno member `next_regno_allocno'). */
60 ira_allocno_t
*ira_regno_allocno_map
;
62 /* Array of references to all allocnos. The order number of the
63 allocno corresponds to the index in the array. Removed allocnos
64 have NULL element value. */
65 ira_allocno_t
*ira_allocnos
;
67 /* Sizes of the previous array. */
70 /* Count of conflict record structures we've created, used when creating
74 /* Map a conflict id to its conflict record. */
75 ira_object_t
*ira_object_id_map
;
77 /* Array of references to all allocno preferences. The order number
78 of the preference corresponds to the index in the array. */
79 ira_pref_t
*ira_prefs
;
81 /* Size of the previous array. */
84 /* Array of references to all copies. The order number of the copy
85 corresponds to the index in the array. Removed copies have NULL
87 ira_copy_t
*ira_copies
;
89 /* Size of the previous array. */
94 /* LAST_BASIC_BLOCK before generating additional insns because of live
95 range splitting. Emitting insns on a critical edge creates a new
97 static int last_basic_block_before_change
;
99 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
100 the member loop_num. */
102 init_loop_tree_node (struct ira_loop_tree_node
*node
, int loop_num
)
104 int max_regno
= max_reg_num ();
106 node
->regno_allocno_map
107 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
) * max_regno
);
108 memset (node
->regno_allocno_map
, 0, sizeof (ira_allocno_t
) * max_regno
);
109 memset (node
->reg_pressure
, 0, sizeof (node
->reg_pressure
));
110 node
->all_allocnos
= ira_allocate_bitmap ();
111 node
->modified_regnos
= ira_allocate_bitmap ();
112 node
->border_allocnos
= ira_allocate_bitmap ();
113 node
->local_copies
= ira_allocate_bitmap ();
114 node
->loop_num
= loop_num
;
115 node
->children
= NULL
;
116 node
->subloops
= NULL
;
120 /* The following function allocates the loop tree nodes. If
121 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
122 the root which corresponds the all function) will be not allocated
123 but nodes will still be allocated for basic blocks. */
125 create_loop_tree_nodes (void)
135 = ((struct ira_loop_tree_node
*)
136 ira_allocate (sizeof (struct ira_loop_tree_node
)
137 * last_basic_block_for_fn (cfun
)));
138 last_basic_block_before_change
= last_basic_block_for_fn (cfun
);
139 for (i
= 0; i
< (unsigned int) last_basic_block_for_fn (cfun
); i
++)
141 ira_bb_nodes
[i
].regno_allocno_map
= NULL
;
142 memset (ira_bb_nodes
[i
].reg_pressure
, 0,
143 sizeof (ira_bb_nodes
[i
].reg_pressure
));
144 ira_bb_nodes
[i
].all_allocnos
= NULL
;
145 ira_bb_nodes
[i
].modified_regnos
= NULL
;
146 ira_bb_nodes
[i
].border_allocnos
= NULL
;
147 ira_bb_nodes
[i
].local_copies
= NULL
;
149 if (current_loops
== NULL
)
151 ira_loop_nodes_count
= 1;
152 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
153 ira_allocate (sizeof (struct ira_loop_tree_node
)));
154 init_loop_tree_node (ira_loop_nodes
, 0);
157 ira_loop_nodes_count
= number_of_loops (cfun
);
158 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
159 ira_allocate (sizeof (struct ira_loop_tree_node
)
160 * ira_loop_nodes_count
));
161 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
163 if (loop_outer (loop
) != NULL
)
165 ira_loop_nodes
[i
].regno_allocno_map
= NULL
;
167 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
168 if (e
->src
!= loop
->latch
169 && (e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
176 edges
= get_loop_exit_edges (loop
);
177 FOR_EACH_VEC_ELT (edges
, j
, e
)
178 if ((e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
187 init_loop_tree_node (&ira_loop_nodes
[i
], loop
->num
);
191 /* The function returns TRUE if there are more one allocation
194 more_one_region_p (void)
199 if (current_loops
!= NULL
)
200 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
201 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
202 && ira_loop_tree_root
!= &ira_loop_nodes
[i
])
207 /* Free the loop tree node of a loop. */
209 finish_loop_tree_node (ira_loop_tree_node_t loop
)
211 if (loop
->regno_allocno_map
!= NULL
)
213 ira_assert (loop
->bb
== NULL
);
214 ira_free_bitmap (loop
->local_copies
);
215 ira_free_bitmap (loop
->border_allocnos
);
216 ira_free_bitmap (loop
->modified_regnos
);
217 ira_free_bitmap (loop
->all_allocnos
);
218 ira_free (loop
->regno_allocno_map
);
219 loop
->regno_allocno_map
= NULL
;
223 /* Free the loop tree nodes. */
225 finish_loop_tree_nodes (void)
229 for (i
= 0; i
< ira_loop_nodes_count
; i
++)
230 finish_loop_tree_node (&ira_loop_nodes
[i
]);
231 ira_free (ira_loop_nodes
);
232 for (i
= 0; i
< (unsigned int) last_basic_block_before_change
; i
++)
234 if (ira_bb_nodes
[i
].local_copies
!= NULL
)
235 ira_free_bitmap (ira_bb_nodes
[i
].local_copies
);
236 if (ira_bb_nodes
[i
].border_allocnos
!= NULL
)
237 ira_free_bitmap (ira_bb_nodes
[i
].border_allocnos
);
238 if (ira_bb_nodes
[i
].modified_regnos
!= NULL
)
239 ira_free_bitmap (ira_bb_nodes
[i
].modified_regnos
);
240 if (ira_bb_nodes
[i
].all_allocnos
!= NULL
)
241 ira_free_bitmap (ira_bb_nodes
[i
].all_allocnos
);
242 if (ira_bb_nodes
[i
].regno_allocno_map
!= NULL
)
243 ira_free (ira_bb_nodes
[i
].regno_allocno_map
);
245 ira_free (ira_bb_nodes
);
250 /* The following recursive function adds LOOP to the loop tree
251 hierarchy. LOOP is added only once. If LOOP is NULL we adding
252 loop designating the whole function when CFG loops are not
255 add_loop_to_tree (class loop
*loop
)
259 ira_loop_tree_node_t loop_node
, parent_node
;
261 /* We cannot use loop node access macros here because of potential
262 checking and because the nodes are not initialized enough
264 if (loop
!= NULL
&& loop_outer (loop
) != NULL
)
265 add_loop_to_tree (loop_outer (loop
));
266 loop_num
= loop
!= NULL
? loop
->num
: 0;
267 if (ira_loop_nodes
[loop_num
].regno_allocno_map
!= NULL
268 && ira_loop_nodes
[loop_num
].children
== NULL
)
270 /* We have not added loop node to the tree yet. */
271 loop_node
= &ira_loop_nodes
[loop_num
];
272 loop_node
->loop
= loop
;
273 loop_node
->bb
= NULL
;
278 for (parent
= loop_outer (loop
);
280 parent
= loop_outer (parent
))
281 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
286 loop_node
->next
= NULL
;
287 loop_node
->subloop_next
= NULL
;
288 loop_node
->parent
= NULL
;
292 parent_node
= &ira_loop_nodes
[parent
->num
];
293 loop_node
->next
= parent_node
->children
;
294 parent_node
->children
= loop_node
;
295 loop_node
->subloop_next
= parent_node
->subloops
;
296 parent_node
->subloops
= loop_node
;
297 loop_node
->parent
= parent_node
;
302 /* The following recursive function sets up levels of nodes of the
303 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
304 The function returns maximal value of level in the tree + 1. */
306 setup_loop_tree_level (ira_loop_tree_node_t loop_node
, int level
)
308 int height
, max_height
;
309 ira_loop_tree_node_t subloop_node
;
311 ira_assert (loop_node
->bb
== NULL
);
312 loop_node
->level
= level
;
313 max_height
= level
+ 1;
314 for (subloop_node
= loop_node
->subloops
;
315 subloop_node
!= NULL
;
316 subloop_node
= subloop_node
->subloop_next
)
318 ira_assert (subloop_node
->bb
== NULL
);
319 height
= setup_loop_tree_level (subloop_node
, level
+ 1);
320 if (height
> max_height
)
326 /* Create the loop tree. The algorithm is designed to provide correct
327 order of loops (they are ordered by their last loop BB) and basic
328 blocks in the chain formed by member next. */
330 form_loop_tree (void)
334 ira_loop_tree_node_t bb_node
, loop_node
;
336 /* We cannot use loop/bb node access macros because of potential
337 checking and because the nodes are not initialized enough
339 FOR_EACH_BB_FN (bb
, cfun
)
341 bb_node
= &ira_bb_nodes
[bb
->index
];
343 bb_node
->loop
= NULL
;
344 bb_node
->subloops
= NULL
;
345 bb_node
->children
= NULL
;
346 bb_node
->subloop_next
= NULL
;
347 bb_node
->next
= NULL
;
348 if (current_loops
== NULL
)
352 for (parent
= bb
->loop_father
;
354 parent
= loop_outer (parent
))
355 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
358 add_loop_to_tree (parent
);
359 loop_node
= &ira_loop_nodes
[parent
== NULL
? 0 : parent
->num
];
360 bb_node
->next
= loop_node
->children
;
361 bb_node
->parent
= loop_node
;
362 loop_node
->children
= bb_node
;
364 ira_loop_tree_root
= IRA_LOOP_NODE_BY_INDEX (0);
365 ira_loop_tree_height
= setup_loop_tree_level (ira_loop_tree_root
, 0);
366 ira_assert (ira_loop_tree_root
->regno_allocno_map
!= NULL
);
371 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
374 rebuild_regno_allocno_maps (void)
377 int max_regno
, regno
;
379 ira_loop_tree_node_t loop_tree_node
;
381 ira_allocno_iterator ai
;
383 ira_assert (current_loops
!= NULL
);
384 max_regno
= max_reg_num ();
385 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), l
, loop
)
386 if (ira_loop_nodes
[l
].regno_allocno_map
!= NULL
)
388 ira_free (ira_loop_nodes
[l
].regno_allocno_map
);
389 ira_loop_nodes
[l
].regno_allocno_map
390 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
392 memset (ira_loop_nodes
[l
].regno_allocno_map
, 0,
393 sizeof (ira_allocno_t
) * max_regno
);
395 ira_free (ira_regno_allocno_map
);
396 ira_regno_allocno_map
397 = (ira_allocno_t
*) ira_allocate (max_regno
* sizeof (ira_allocno_t
));
398 memset (ira_regno_allocno_map
, 0, max_regno
* sizeof (ira_allocno_t
));
399 FOR_EACH_ALLOCNO (a
, ai
)
401 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
402 /* Caps are not in the regno allocno maps. */
404 regno
= ALLOCNO_REGNO (a
);
405 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
406 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
407 ira_regno_allocno_map
[regno
] = a
;
408 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
409 /* Remember that we can create temporary allocnos to break
410 cycles in register shuffle. */
411 loop_tree_node
->regno_allocno_map
[regno
] = a
;
416 /* Pools for allocnos, allocno live ranges and objects. */
417 static object_allocator
<live_range
> live_range_pool ("live ranges");
418 static object_allocator
<ira_allocno
> allocno_pool ("allocnos");
419 static object_allocator
<ira_object
> object_pool ("objects");
421 /* Vec containing references to all created allocnos. It is a
422 container of array allocnos. */
423 static vec
<ira_allocno_t
> allocno_vec
;
425 /* Vec containing references to all created ira_objects. It is a
426 container of ira_object_id_map. */
427 static vec
<ira_object_t
> ira_object_id_map_vec
;
429 /* Initialize data concerning allocnos. */
431 initiate_allocnos (void)
433 allocno_vec
.create (max_reg_num () * 2);
435 ira_allocnos_num
= 0;
437 ira_object_id_map_vec
.create (max_reg_num () * 2);
438 ira_object_id_map
= NULL
;
439 ira_regno_allocno_map
440 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
441 * sizeof (ira_allocno_t
));
442 memset (ira_regno_allocno_map
, 0, max_reg_num () * sizeof (ira_allocno_t
));
445 /* Create and return an object corresponding to a new allocno A. */
447 ira_create_object (ira_allocno_t a
, int subword
)
449 enum reg_class aclass
= ALLOCNO_CLASS (a
);
450 ira_object_t obj
= object_pool
.allocate ();
452 OBJECT_ALLOCNO (obj
) = a
;
453 OBJECT_SUBWORD (obj
) = subword
;
454 OBJECT_CONFLICT_ID (obj
) = ira_objects_num
;
455 OBJECT_CONFLICT_VEC_P (obj
) = false;
456 OBJECT_CONFLICT_ARRAY (obj
) = NULL
;
457 OBJECT_NUM_CONFLICTS (obj
) = 0;
458 OBJECT_CONFLICT_HARD_REGS (obj
) = ira_no_alloc_regs
;
459 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) = ira_no_alloc_regs
;
460 OBJECT_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
461 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
462 OBJECT_MIN (obj
) = INT_MAX
;
463 OBJECT_MAX (obj
) = -1;
464 OBJECT_LIVE_RANGES (obj
) = NULL
;
466 ira_object_id_map_vec
.safe_push (obj
);
468 = ira_object_id_map_vec
.address ();
469 ira_objects_num
= ira_object_id_map_vec
.length ();
474 /* Create and return the allocno corresponding to REGNO in
475 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
476 same regno if CAP_P is FALSE. */
478 ira_create_allocno (int regno
, bool cap_p
,
479 ira_loop_tree_node_t loop_tree_node
)
483 a
= allocno_pool
.allocate ();
484 ALLOCNO_REGNO (a
) = regno
;
485 ALLOCNO_LOOP_TREE_NODE (a
) = loop_tree_node
;
488 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
489 ira_regno_allocno_map
[regno
] = a
;
490 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
491 /* Remember that we can create temporary allocnos to break
492 cycles in register shuffle on region borders (see
494 loop_tree_node
->regno_allocno_map
[regno
] = a
;
496 ALLOCNO_CAP (a
) = NULL
;
497 ALLOCNO_CAP_MEMBER (a
) = NULL
;
498 ALLOCNO_NUM (a
) = ira_allocnos_num
;
499 bitmap_set_bit (loop_tree_node
->all_allocnos
, ALLOCNO_NUM (a
));
500 ALLOCNO_NREFS (a
) = 0;
501 ALLOCNO_FREQ (a
) = 0;
502 ALLOCNO_HARD_REGNO (a
) = -1;
503 ALLOCNO_CALL_FREQ (a
) = 0;
504 ALLOCNO_CALLS_CROSSED_NUM (a
) = 0;
505 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
) = 0;
506 ALLOCNO_CROSSED_CALLS_ABIS (a
) = 0;
507 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
509 ALLOCNO_NO_STACK_REG_P (a
) = false;
510 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = false;
512 ALLOCNO_DONT_REASSIGN_P (a
) = false;
513 ALLOCNO_BAD_SPILL_P (a
) = false;
514 ALLOCNO_ASSIGNED_P (a
) = false;
515 ALLOCNO_MODE (a
) = (regno
< 0 ? VOIDmode
: PSEUDO_REGNO_MODE (regno
));
516 ALLOCNO_WMODE (a
) = ALLOCNO_MODE (a
);
517 ALLOCNO_PREFS (a
) = NULL
;
518 ALLOCNO_COPIES (a
) = NULL
;
519 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
520 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
521 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
522 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
523 ALLOCNO_CLASS (a
) = NO_REGS
;
524 ALLOCNO_UPDATED_CLASS_COST (a
) = 0;
525 ALLOCNO_CLASS_COST (a
) = 0;
526 ALLOCNO_MEMORY_COST (a
) = 0;
527 ALLOCNO_UPDATED_MEMORY_COST (a
) = 0;
528 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) = 0;
529 ALLOCNO_NUM_OBJECTS (a
) = 0;
531 ALLOCNO_ADD_DATA (a
) = NULL
;
532 allocno_vec
.safe_push (a
);
533 ira_allocnos
= allocno_vec
.address ();
534 ira_allocnos_num
= allocno_vec
.length ();
539 /* Set up register class for A and update its conflict hard
542 ira_set_allocno_class (ira_allocno_t a
, enum reg_class aclass
)
544 ira_allocno_object_iterator oi
;
547 ALLOCNO_CLASS (a
) = aclass
;
548 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
550 OBJECT_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
551 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
555 /* Determine the number of objects we should associate with allocno A
556 and allocate them. */
558 ira_create_allocno_objects (ira_allocno_t a
)
560 machine_mode mode
= ALLOCNO_MODE (a
);
561 enum reg_class aclass
= ALLOCNO_CLASS (a
);
562 int n
= ira_reg_class_max_nregs
[aclass
][mode
];
565 if (n
!= 2 || maybe_ne (GET_MODE_SIZE (mode
), n
* UNITS_PER_WORD
))
568 ALLOCNO_NUM_OBJECTS (a
) = n
;
569 for (i
= 0; i
< n
; i
++)
570 ALLOCNO_OBJECT (a
, i
) = ira_create_object (a
, i
);
573 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
574 ALLOCNO_OBJECT structures. This must be called after the allocno
575 classes are known. */
577 create_allocno_objects (void)
580 ira_allocno_iterator ai
;
582 FOR_EACH_ALLOCNO (a
, ai
)
583 ira_create_allocno_objects (a
);
586 /* Merge hard register conflict information for all objects associated with
587 allocno TO into the corresponding objects associated with FROM.
588 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
590 merge_hard_reg_conflicts (ira_allocno_t from
, ira_allocno_t to
,
594 gcc_assert (ALLOCNO_NUM_OBJECTS (to
) == ALLOCNO_NUM_OBJECTS (from
));
595 for (i
= 0; i
< ALLOCNO_NUM_OBJECTS (to
); i
++)
597 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
598 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
601 OBJECT_CONFLICT_HARD_REGS (to_obj
)
602 |= OBJECT_CONFLICT_HARD_REGS (from_obj
);
603 OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj
)
604 |= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj
);
607 if (!total_only
&& ALLOCNO_NO_STACK_REG_P (from
))
608 ALLOCNO_NO_STACK_REG_P (to
) = true;
609 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from
))
610 ALLOCNO_TOTAL_NO_STACK_REG_P (to
) = true;
614 /* Update hard register conflict information for all objects associated with
615 A to include the regs in SET. */
617 ior_hard_reg_conflicts (ira_allocno_t a
, const_hard_reg_set set
)
619 ira_allocno_object_iterator i
;
622 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, i
)
624 OBJECT_CONFLICT_HARD_REGS (obj
) |= set
;
625 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= set
;
629 /* Return TRUE if a conflict vector with NUM elements is more
630 profitable than a conflict bit vector for OBJ. */
632 ira_conflict_vector_profitable_p (ira_object_t obj
, int num
)
635 int max
= OBJECT_MAX (obj
);
636 int min
= OBJECT_MIN (obj
);
639 /* We prefer a bit vector in such case because it does not result
643 nw
= (max
- min
+ IRA_INT_BITS
) / IRA_INT_BITS
;
644 return (2 * sizeof (ira_object_t
) * (num
+ 1)
645 < 3 * nw
* sizeof (IRA_INT_TYPE
));
648 /* Allocates and initialize the conflict vector of OBJ for NUM
649 conflicting objects. */
651 ira_allocate_conflict_vec (ira_object_t obj
, int num
)
656 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
657 num
++; /* for NULL end marker */
658 size
= sizeof (ira_object_t
) * num
;
659 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
660 vec
= (ira_object_t
*) OBJECT_CONFLICT_ARRAY (obj
);
662 OBJECT_NUM_CONFLICTS (obj
) = 0;
663 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
664 OBJECT_CONFLICT_VEC_P (obj
) = true;
667 /* Allocate and initialize the conflict bit vector of OBJ. */
669 allocate_conflict_bit_vec (ira_object_t obj
)
673 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
674 size
= ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
675 / IRA_INT_BITS
* sizeof (IRA_INT_TYPE
));
676 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
677 memset (OBJECT_CONFLICT_ARRAY (obj
), 0, size
);
678 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
679 OBJECT_CONFLICT_VEC_P (obj
) = false;
682 /* Allocate and initialize the conflict vector or conflict bit vector
683 of OBJ for NUM conflicting allocnos whatever is more profitable. */
685 ira_allocate_object_conflicts (ira_object_t obj
, int num
)
687 if (ira_conflict_vector_profitable_p (obj
, num
))
688 ira_allocate_conflict_vec (obj
, num
);
690 allocate_conflict_bit_vec (obj
);
693 /* Add OBJ2 to the conflicts of OBJ1. */
695 add_to_conflicts (ira_object_t obj1
, ira_object_t obj2
)
700 if (OBJECT_CONFLICT_VEC_P (obj1
))
702 ira_object_t
*vec
= OBJECT_CONFLICT_VEC (obj1
);
703 int curr_num
= OBJECT_NUM_CONFLICTS (obj1
);
705 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < num
* sizeof (ira_object_t
))
707 ira_object_t
*newvec
;
708 size
= (3 * num
/ 2 + 1) * sizeof (ira_allocno_t
);
709 newvec
= (ira_object_t
*) ira_allocate (size
);
710 memcpy (newvec
, vec
, curr_num
* sizeof (ira_object_t
));
713 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
714 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
718 OBJECT_NUM_CONFLICTS (obj1
)++;
722 int nw
, added_head_nw
, id
;
723 IRA_INT_TYPE
*vec
= OBJECT_CONFLICT_BITVEC (obj1
);
725 id
= OBJECT_CONFLICT_ID (obj2
);
726 if (OBJECT_MIN (obj1
) > id
)
728 /* Expand head of the bit vector. */
729 added_head_nw
= (OBJECT_MIN (obj1
) - id
- 1) / IRA_INT_BITS
+ 1;
730 nw
= (OBJECT_MAX (obj1
) - OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
731 size
= (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
);
732 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) >= size
)
734 memmove ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
735 vec
, nw
* sizeof (IRA_INT_TYPE
));
736 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
741 = (3 * (nw
+ added_head_nw
) / 2 + 1) * sizeof (IRA_INT_TYPE
);
742 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
743 memcpy ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
744 OBJECT_CONFLICT_ARRAY (obj1
), nw
* sizeof (IRA_INT_TYPE
));
745 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
747 + (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
),
748 0, size
- (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
));
749 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
750 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
751 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
753 OBJECT_MIN (obj1
) -= added_head_nw
* IRA_INT_BITS
;
755 else if (OBJECT_MAX (obj1
) < id
)
757 nw
= (id
- OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
758 size
= nw
* sizeof (IRA_INT_TYPE
);
759 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < size
)
761 /* Expand tail of the bit vector. */
762 size
= (3 * nw
/ 2 + 1) * sizeof (IRA_INT_TYPE
);
763 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
764 memcpy (vec
, OBJECT_CONFLICT_ARRAY (obj1
), OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
765 memset ((char *) vec
+ OBJECT_CONFLICT_ARRAY_SIZE (obj1
),
766 0, size
- OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
767 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
768 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
769 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
771 OBJECT_MAX (obj1
) = id
;
773 SET_MINMAX_SET_BIT (vec
, id
, OBJECT_MIN (obj1
), OBJECT_MAX (obj1
));
777 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
779 ira_add_conflict (ira_object_t obj1
, ira_object_t obj2
)
781 add_to_conflicts (obj1
, obj2
);
782 add_to_conflicts (obj2
, obj1
);
785 /* Clear all conflicts of OBJ. */
787 clear_conflicts (ira_object_t obj
)
789 if (OBJECT_CONFLICT_VEC_P (obj
))
791 OBJECT_NUM_CONFLICTS (obj
) = 0;
792 OBJECT_CONFLICT_VEC (obj
)[0] = NULL
;
794 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj
) != 0)
798 nw
= (OBJECT_MAX (obj
) - OBJECT_MIN (obj
)) / IRA_INT_BITS
+ 1;
799 memset (OBJECT_CONFLICT_BITVEC (obj
), 0, nw
* sizeof (IRA_INT_TYPE
));
803 /* The array used to find duplications in conflict vectors of
805 static int *conflict_check
;
807 /* The value used to mark allocation presence in conflict vector of
808 the current allocno. */
809 static int curr_conflict_check_tick
;
811 /* Remove duplications in conflict vector of OBJ. */
813 compress_conflict_vec (ira_object_t obj
)
815 ira_object_t
*vec
, conflict_obj
;
818 ira_assert (OBJECT_CONFLICT_VEC_P (obj
));
819 vec
= OBJECT_CONFLICT_VEC (obj
);
820 curr_conflict_check_tick
++;
821 for (i
= j
= 0; (conflict_obj
= vec
[i
]) != NULL
; i
++)
823 int id
= OBJECT_CONFLICT_ID (conflict_obj
);
824 if (conflict_check
[id
] != curr_conflict_check_tick
)
826 conflict_check
[id
] = curr_conflict_check_tick
;
827 vec
[j
++] = conflict_obj
;
830 OBJECT_NUM_CONFLICTS (obj
) = j
;
834 /* Remove duplications in conflict vectors of all allocnos. */
836 compress_conflict_vecs (void)
839 ira_object_iterator oi
;
841 conflict_check
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
842 memset (conflict_check
, 0, sizeof (int) * ira_objects_num
);
843 curr_conflict_check_tick
= 0;
844 FOR_EACH_OBJECT (obj
, oi
)
846 if (OBJECT_CONFLICT_VEC_P (obj
))
847 compress_conflict_vec (obj
);
849 ira_free (conflict_check
);
852 /* This recursive function outputs allocno A and if it is a cap the
853 function outputs its members. */
855 ira_print_expanded_allocno (ira_allocno_t a
)
859 fprintf (ira_dump_file
, " a%d(r%d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
860 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
861 fprintf (ira_dump_file
, ",b%d", bb
->index
);
863 fprintf (ira_dump_file
, ",l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
864 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
866 fprintf (ira_dump_file
, ":");
867 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a
));
869 fprintf (ira_dump_file
, ")");
872 /* Create and return the cap representing allocno A in the
875 create_cap_allocno (ira_allocno_t a
)
878 ira_loop_tree_node_t parent
;
879 enum reg_class aclass
;
881 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
882 cap
= ira_create_allocno (ALLOCNO_REGNO (a
), true, parent
);
883 ALLOCNO_MODE (cap
) = ALLOCNO_MODE (a
);
884 ALLOCNO_WMODE (cap
) = ALLOCNO_WMODE (a
);
885 aclass
= ALLOCNO_CLASS (a
);
886 ira_set_allocno_class (cap
, aclass
);
887 ira_create_allocno_objects (cap
);
888 ALLOCNO_CAP_MEMBER (cap
) = a
;
889 ALLOCNO_CAP (a
) = cap
;
890 ALLOCNO_CLASS_COST (cap
) = ALLOCNO_CLASS_COST (a
);
891 ALLOCNO_MEMORY_COST (cap
) = ALLOCNO_MEMORY_COST (a
);
892 ira_allocate_and_copy_costs
893 (&ALLOCNO_HARD_REG_COSTS (cap
), aclass
, ALLOCNO_HARD_REG_COSTS (a
));
894 ira_allocate_and_copy_costs
895 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap
), aclass
,
896 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
897 ALLOCNO_BAD_SPILL_P (cap
) = ALLOCNO_BAD_SPILL_P (a
);
898 ALLOCNO_NREFS (cap
) = ALLOCNO_NREFS (a
);
899 ALLOCNO_FREQ (cap
) = ALLOCNO_FREQ (a
);
900 ALLOCNO_CALL_FREQ (cap
) = ALLOCNO_CALL_FREQ (a
);
902 merge_hard_reg_conflicts (a
, cap
, false);
904 ALLOCNO_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CALLS_CROSSED_NUM (a
);
905 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
906 ALLOCNO_CROSSED_CALLS_ABIS (cap
) = ALLOCNO_CROSSED_CALLS_ABIS (a
);
907 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap
)
908 = ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
);
909 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
911 fprintf (ira_dump_file
, " Creating cap ");
912 ira_print_expanded_allocno (cap
);
913 fprintf (ira_dump_file
, "\n");
918 /* Create and return a live range for OBJECT with given attributes. */
920 ira_create_live_range (ira_object_t obj
, int start
, int finish
,
925 p
= live_range_pool
.allocate ();
933 /* Create a new live range for OBJECT and queue it at the head of its
936 ira_add_live_range_to_object (ira_object_t object
, int start
, int finish
)
939 p
= ira_create_live_range (object
, start
, finish
,
940 OBJECT_LIVE_RANGES (object
));
941 OBJECT_LIVE_RANGES (object
) = p
;
944 /* Copy allocno live range R and return the result. */
946 copy_live_range (live_range_t r
)
950 p
= live_range_pool
.allocate ();
955 /* Copy allocno live range list given by its head R and return the
958 ira_copy_live_range_list (live_range_t r
)
960 live_range_t p
, first
, last
;
964 for (first
= last
= NULL
; r
!= NULL
; r
= r
->next
)
966 p
= copy_live_range (r
);
976 /* Merge ranges R1 and R2 and returns the result. The function
977 maintains the order of ranges and tries to minimize number of the
980 ira_merge_live_ranges (live_range_t r1
, live_range_t r2
)
982 live_range_t first
, last
;
988 for (first
= last
= NULL
; r1
!= NULL
&& r2
!= NULL
;)
990 if (r1
->start
< r2
->start
)
992 if (r1
->start
<= r2
->finish
+ 1)
994 /* Intersected ranges: merge r1 and r2 into r1. */
995 r1
->start
= r2
->start
;
996 if (r1
->finish
< r2
->finish
)
997 r1
->finish
= r2
->finish
;
998 live_range_t temp
= r2
;
1000 ira_finish_live_range (temp
);
1003 /* To try to merge with subsequent ranges in r1. */
1010 /* Add r1 to the result. */
1021 /* To try to merge with subsequent ranges in r2. */
1033 ira_assert (r1
->next
== NULL
);
1035 else if (r2
!= NULL
)
1041 ira_assert (r2
->next
== NULL
);
1045 ira_assert (last
->next
== NULL
);
1050 /* Return TRUE if live ranges R1 and R2 intersect. */
1052 ira_live_ranges_intersect_p (live_range_t r1
, live_range_t r2
)
1054 /* Remember the live ranges are always kept ordered. */
1055 while (r1
!= NULL
&& r2
!= NULL
)
1057 if (r1
->start
> r2
->finish
)
1059 else if (r2
->start
> r1
->finish
)
1067 /* Free allocno live range R. */
1069 ira_finish_live_range (live_range_t r
)
1071 live_range_pool
.remove (r
);
1074 /* Free list of allocno live ranges starting with R. */
1076 ira_finish_live_range_list (live_range_t r
)
1078 live_range_t next_r
;
1080 for (; r
!= NULL
; r
= next_r
)
1083 ira_finish_live_range (r
);
1087 /* Free updated register costs of allocno A. */
1089 ira_free_allocno_updated_costs (ira_allocno_t a
)
1091 enum reg_class aclass
;
1093 aclass
= ALLOCNO_CLASS (a
);
1094 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1095 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1096 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1097 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1098 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1100 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1103 /* Free and nullify all cost vectors allocated earlier for allocno
1106 ira_free_allocno_costs (ira_allocno_t a
)
1108 enum reg_class aclass
= ALLOCNO_CLASS (a
);
1110 ira_allocno_object_iterator oi
;
1112 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
1114 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj
));
1115 ira_object_id_map
[OBJECT_CONFLICT_ID (obj
)] = NULL
;
1116 if (OBJECT_CONFLICT_ARRAY (obj
) != NULL
)
1117 ira_free (OBJECT_CONFLICT_ARRAY (obj
));
1118 object_pool
.remove (obj
);
1121 ira_allocnos
[ALLOCNO_NUM (a
)] = NULL
;
1122 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
1123 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a
), aclass
);
1124 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1125 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
);
1126 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1127 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1128 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1129 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1131 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
1132 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1133 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1134 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1137 /* Free the memory allocated for allocno A. */
1139 finish_allocno (ira_allocno_t a
)
1141 ira_free_allocno_costs (a
);
1142 allocno_pool
.remove (a
);
1145 /* Free the memory allocated for all allocnos. */
1147 finish_allocnos (void)
1150 ira_allocno_iterator ai
;
1152 FOR_EACH_ALLOCNO (a
, ai
)
1154 ira_free (ira_regno_allocno_map
);
1155 ira_object_id_map_vec
.release ();
1156 allocno_vec
.release ();
1157 allocno_pool
.release ();
1158 object_pool
.release ();
1159 live_range_pool
.release ();
1164 /* Pools for allocno preferences. */
1165 static object_allocator
<ira_allocno_pref
> pref_pool ("prefs");
1167 /* Vec containing references to all created preferences. It is a
1168 container of array ira_prefs. */
1169 static vec
<ira_pref_t
> pref_vec
;
1171 /* The function initializes data concerning allocno prefs. */
1173 initiate_prefs (void)
1175 pref_vec
.create (get_max_uid ());
1180 /* Return pref for A and HARD_REGNO if any. */
1182 find_allocno_pref (ira_allocno_t a
, int hard_regno
)
1186 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1187 if (pref
->allocno
== a
&& pref
->hard_regno
== hard_regno
)
1192 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1194 ira_create_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1198 pref
= pref_pool
.allocate ();
1199 pref
->num
= ira_prefs_num
;
1201 pref
->hard_regno
= hard_regno
;
1203 pref_vec
.safe_push (pref
);
1204 ira_prefs
= pref_vec
.address ();
1205 ira_prefs_num
= pref_vec
.length ();
1209 /* Attach a pref PREF to the corresponding allocno. */
1211 add_allocno_pref_to_list (ira_pref_t pref
)
1213 ira_allocno_t a
= pref
->allocno
;
1215 pref
->next_pref
= ALLOCNO_PREFS (a
);
1216 ALLOCNO_PREFS (a
) = pref
;
1219 /* Create (or update frequency if the pref already exists) the pref of
1220 allocnos A preferring HARD_REGNO with frequency FREQ. */
1222 ira_add_allocno_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1228 if ((pref
= find_allocno_pref (a
, hard_regno
)) != NULL
)
1233 pref
= ira_create_pref (a
, hard_regno
, freq
);
1234 ira_assert (a
!= NULL
);
1235 add_allocno_pref_to_list (pref
);
1238 /* Print info about PREF into file F. */
1240 print_pref (FILE *f
, ira_pref_t pref
)
1242 fprintf (f
, " pref%d:a%d(r%d)<-hr%d@%d\n", pref
->num
,
1243 ALLOCNO_NUM (pref
->allocno
), ALLOCNO_REGNO (pref
->allocno
),
1244 pref
->hard_regno
, pref
->freq
);
1247 /* Print info about PREF into stderr. */
1249 ira_debug_pref (ira_pref_t pref
)
1251 print_pref (stderr
, pref
);
1254 /* Print info about all prefs into file F. */
1256 print_prefs (FILE *f
)
1259 ira_pref_iterator pi
;
1261 FOR_EACH_PREF (pref
, pi
)
1262 print_pref (f
, pref
);
1265 /* Print info about all prefs into stderr. */
1267 ira_debug_prefs (void)
1269 print_prefs (stderr
);
1272 /* Print info about prefs involving allocno A into file F. */
1274 print_allocno_prefs (FILE *f
, ira_allocno_t a
)
1278 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1279 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1280 fprintf (f
, " pref%d:hr%d@%d", pref
->num
, pref
->hard_regno
, pref
->freq
);
1284 /* Print info about prefs involving allocno A into stderr. */
1286 ira_debug_allocno_prefs (ira_allocno_t a
)
1288 print_allocno_prefs (stderr
, a
);
1291 /* The function frees memory allocated for PREF. */
1293 finish_pref (ira_pref_t pref
)
1295 ira_prefs
[pref
->num
] = NULL
;
1296 pref_pool
.remove (pref
);
1299 /* Remove PREF from the list of allocno prefs and free memory for
1302 ira_remove_pref (ira_pref_t pref
)
1304 ira_pref_t cpref
, prev
;
1306 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
1307 fprintf (ira_dump_file
, " Removing pref%d:hr%d@%d\n",
1308 pref
->num
, pref
->hard_regno
, pref
->freq
);
1309 for (prev
= NULL
, cpref
= ALLOCNO_PREFS (pref
->allocno
);
1311 prev
= cpref
, cpref
= cpref
->next_pref
)
1314 ira_assert (cpref
!= NULL
);
1316 ALLOCNO_PREFS (pref
->allocno
) = pref
->next_pref
;
1318 prev
->next_pref
= pref
->next_pref
;
1322 /* Remove all prefs of allocno A. */
1324 ira_remove_allocno_prefs (ira_allocno_t a
)
1326 ira_pref_t pref
, next_pref
;
1328 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
1330 next_pref
= pref
->next_pref
;
1333 ALLOCNO_PREFS (a
) = NULL
;
1336 /* Free memory allocated for all prefs. */
1341 ira_pref_iterator pi
;
1343 FOR_EACH_PREF (pref
, pi
)
1345 pref_vec
.release ();
1346 pref_pool
.release ();
1351 /* Pools for copies. */
1352 static object_allocator
<ira_allocno_copy
> copy_pool ("copies");
1354 /* Vec containing references to all created copies. It is a
1355 container of array ira_copies. */
1356 static vec
<ira_copy_t
> copy_vec
;
1358 /* The function initializes data concerning allocno copies. */
1360 initiate_copies (void)
1362 copy_vec
.create (get_max_uid ());
1367 /* Return copy connecting A1 and A2 and originated from INSN of
1368 LOOP_TREE_NODE if any. */
1370 find_allocno_copy (ira_allocno_t a1
, ira_allocno_t a2
, rtx_insn
*insn
,
1371 ira_loop_tree_node_t loop_tree_node
)
1373 ira_copy_t cp
, next_cp
;
1374 ira_allocno_t another_a
;
1376 for (cp
= ALLOCNO_COPIES (a1
); cp
!= NULL
; cp
= next_cp
)
1378 if (cp
->first
== a1
)
1380 next_cp
= cp
->next_first_allocno_copy
;
1381 another_a
= cp
->second
;
1383 else if (cp
->second
== a1
)
1385 next_cp
= cp
->next_second_allocno_copy
;
1386 another_a
= cp
->first
;
1390 if (another_a
== a2
&& cp
->insn
== insn
1391 && cp
->loop_tree_node
== loop_tree_node
)
1397 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1398 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1400 ira_create_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1401 bool constraint_p
, rtx_insn
*insn
,
1402 ira_loop_tree_node_t loop_tree_node
)
1406 cp
= copy_pool
.allocate ();
1407 cp
->num
= ira_copies_num
;
1409 cp
->second
= second
;
1411 cp
->constraint_p
= constraint_p
;
1413 cp
->loop_tree_node
= loop_tree_node
;
1414 copy_vec
.safe_push (cp
);
1415 ira_copies
= copy_vec
.address ();
1416 ira_copies_num
= copy_vec
.length ();
1420 /* Attach a copy CP to allocnos involved into the copy. */
1422 add_allocno_copy_to_list (ira_copy_t cp
)
1424 ira_allocno_t first
= cp
->first
, second
= cp
->second
;
1426 cp
->prev_first_allocno_copy
= NULL
;
1427 cp
->prev_second_allocno_copy
= NULL
;
1428 cp
->next_first_allocno_copy
= ALLOCNO_COPIES (first
);
1429 if (cp
->next_first_allocno_copy
!= NULL
)
1431 if (cp
->next_first_allocno_copy
->first
== first
)
1432 cp
->next_first_allocno_copy
->prev_first_allocno_copy
= cp
;
1434 cp
->next_first_allocno_copy
->prev_second_allocno_copy
= cp
;
1436 cp
->next_second_allocno_copy
= ALLOCNO_COPIES (second
);
1437 if (cp
->next_second_allocno_copy
!= NULL
)
1439 if (cp
->next_second_allocno_copy
->second
== second
)
1440 cp
->next_second_allocno_copy
->prev_second_allocno_copy
= cp
;
1442 cp
->next_second_allocno_copy
->prev_first_allocno_copy
= cp
;
1444 ALLOCNO_COPIES (first
) = cp
;
1445 ALLOCNO_COPIES (second
) = cp
;
1448 /* Make a copy CP a canonical copy where number of the
1449 first allocno is less than the second one. */
1451 swap_allocno_copy_ends_if_necessary (ira_copy_t cp
)
1453 if (ALLOCNO_NUM (cp
->first
) <= ALLOCNO_NUM (cp
->second
))
1456 std::swap (cp
->first
, cp
->second
);
1457 std::swap (cp
->prev_first_allocno_copy
, cp
->prev_second_allocno_copy
);
1458 std::swap (cp
->next_first_allocno_copy
, cp
->next_second_allocno_copy
);
1461 /* Create (or update frequency if the copy already exists) and return
1462 the copy of allocnos FIRST and SECOND with frequency FREQ
1463 corresponding to move insn INSN (if any) and originated from
1466 ira_add_allocno_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1467 bool constraint_p
, rtx_insn
*insn
,
1468 ira_loop_tree_node_t loop_tree_node
)
1472 if ((cp
= find_allocno_copy (first
, second
, insn
, loop_tree_node
)) != NULL
)
1477 cp
= ira_create_copy (first
, second
, freq
, constraint_p
, insn
,
1479 ira_assert (first
!= NULL
&& second
!= NULL
);
1480 add_allocno_copy_to_list (cp
);
1481 swap_allocno_copy_ends_if_necessary (cp
);
1485 /* Print info about copy CP into file F. */
1487 print_copy (FILE *f
, ira_copy_t cp
)
1489 fprintf (f
, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp
->num
,
1490 ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
1491 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
), cp
->freq
,
1493 ? "move" : cp
->constraint_p
? "constraint" : "shuffle");
1497 debug (ira_allocno_copy
&ref
)
1499 print_copy (stderr
, &ref
);
1503 debug (ira_allocno_copy
*ptr
)
1508 fprintf (stderr
, "<nil>\n");
1511 /* Print info about copy CP into stderr. */
1513 ira_debug_copy (ira_copy_t cp
)
1515 print_copy (stderr
, cp
);
1518 /* Print info about all copies into file F. */
1520 print_copies (FILE *f
)
1523 ira_copy_iterator ci
;
1525 FOR_EACH_COPY (cp
, ci
)
1529 /* Print info about all copies into stderr. */
1531 ira_debug_copies (void)
1533 print_copies (stderr
);
1536 /* Print info about copies involving allocno A into file F. */
1538 print_allocno_copies (FILE *f
, ira_allocno_t a
)
1540 ira_allocno_t another_a
;
1541 ira_copy_t cp
, next_cp
;
1543 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1544 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
1548 next_cp
= cp
->next_first_allocno_copy
;
1549 another_a
= cp
->second
;
1551 else if (cp
->second
== a
)
1553 next_cp
= cp
->next_second_allocno_copy
;
1554 another_a
= cp
->first
;
1558 fprintf (f
, " cp%d:a%d(r%d)@%d", cp
->num
,
1559 ALLOCNO_NUM (another_a
), ALLOCNO_REGNO (another_a
), cp
->freq
);
1565 debug (ira_allocno
&ref
)
1567 print_allocno_copies (stderr
, &ref
);
1571 debug (ira_allocno
*ptr
)
1576 fprintf (stderr
, "<nil>\n");
1580 /* Print info about copies involving allocno A into stderr. */
1582 ira_debug_allocno_copies (ira_allocno_t a
)
1584 print_allocno_copies (stderr
, a
);
1587 /* The function frees memory allocated for copy CP. */
1589 finish_copy (ira_copy_t cp
)
1591 copy_pool
.remove (cp
);
1595 /* Free memory allocated for all copies. */
1597 finish_copies (void)
1600 ira_copy_iterator ci
;
1602 FOR_EACH_COPY (cp
, ci
)
1604 copy_vec
.release ();
1605 copy_pool
.release ();
1610 /* Pools for cost vectors. It is defined only for allocno classes. */
1611 static pool_allocator
*cost_vector_pool
[N_REG_CLASSES
];
1613 /* The function initiates work with hard register cost vectors. It
1614 creates allocation pool for each allocno class. */
1616 initiate_cost_vectors (void)
1619 enum reg_class aclass
;
1621 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1623 aclass
= ira_allocno_classes
[i
];
1624 cost_vector_pool
[aclass
] = new pool_allocator
1625 ("cost vectors", sizeof (int) * (ira_class_hard_regs_num
[aclass
]));
1629 /* Allocate and return a cost vector VEC for ACLASS. */
1631 ira_allocate_cost_vector (reg_class_t aclass
)
1633 return (int*) cost_vector_pool
[(int) aclass
]->allocate ();
1636 /* Free a cost vector VEC for ACLASS. */
1638 ira_free_cost_vector (int *vec
, reg_class_t aclass
)
1640 ira_assert (vec
!= NULL
);
1641 cost_vector_pool
[(int) aclass
]->remove (vec
);
1644 /* Finish work with hard register cost vectors. Release allocation
1645 pool for each allocno class. */
1647 finish_cost_vectors (void)
1650 enum reg_class aclass
;
1652 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1654 aclass
= ira_allocno_classes
[i
];
1655 delete cost_vector_pool
[aclass
];
1661 /* Compute a post-ordering of the reverse control flow of the loop body
1662 designated by the children nodes of LOOP_NODE, whose body nodes in
1663 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1664 of the reverse loop body.
1666 For the post-order of the reverse CFG, we visit the basic blocks in
1667 LOOP_PREORDER array in the reverse order of where they appear.
1668 This is important: We do not just want to compute a post-order of
1669 the reverse CFG, we want to make a best-guess for a visiting order that
1670 minimizes the number of chain elements per allocno live range. If the
1671 blocks would be visited in a different order, we would still compute a
1672 correct post-ordering but it would be less likely that two nodes
1673 connected by an edge in the CFG are neighbors in the topsort. */
1675 static vec
<ira_loop_tree_node_t
>
1676 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED
,
1677 vec
<ira_loop_tree_node_t
> loop_preorder
)
1679 vec
<ira_loop_tree_node_t
> topsort_nodes
= vNULL
;
1680 unsigned int n_loop_preorder
;
1682 n_loop_preorder
= loop_preorder
.length ();
1683 if (n_loop_preorder
!= 0)
1685 ira_loop_tree_node_t subloop_node
;
1687 auto_vec
<ira_loop_tree_node_t
> dfs_stack
;
1689 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1690 the flag to mark blocks we still have to visit to add them to
1691 our post-order. Define an alias to avoid confusion. */
1692 #define BB_TO_VISIT BB_VISITED
1694 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1696 gcc_checking_assert (! (subloop_node
->bb
->flags
& BB_TO_VISIT
));
1697 subloop_node
->bb
->flags
|= BB_TO_VISIT
;
1700 topsort_nodes
.create (n_loop_preorder
);
1701 dfs_stack
.create (n_loop_preorder
);
1703 FOR_EACH_VEC_ELT_REVERSE (loop_preorder
, i
, subloop_node
)
1705 if (! (subloop_node
->bb
->flags
& BB_TO_VISIT
))
1708 subloop_node
->bb
->flags
&= ~BB_TO_VISIT
;
1709 dfs_stack
.quick_push (subloop_node
);
1710 while (! dfs_stack
.is_empty ())
1715 ira_loop_tree_node_t n
= dfs_stack
.last ();
1716 FOR_EACH_EDGE (e
, ei
, n
->bb
->preds
)
1718 ira_loop_tree_node_t pred_node
;
1719 basic_block pred_bb
= e
->src
;
1721 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1724 pred_node
= IRA_BB_NODE_BY_INDEX (pred_bb
->index
);
1726 && (pred_node
->bb
->flags
& BB_TO_VISIT
))
1728 pred_node
->bb
->flags
&= ~BB_TO_VISIT
;
1729 dfs_stack
.quick_push (pred_node
);
1732 if (n
== dfs_stack
.last ())
1735 topsort_nodes
.quick_push (n
);
1743 gcc_assert (topsort_nodes
.length () == n_loop_preorder
);
1744 return topsort_nodes
;
1747 /* The current loop tree node and its regno allocno map. */
1748 ira_loop_tree_node_t ira_curr_loop_tree_node
;
1749 ira_allocno_t
*ira_curr_regno_allocno_map
;
1751 /* This recursive function traverses loop tree with root LOOP_NODE
1752 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1753 correspondingly in preorder and postorder. The function sets up
1754 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1755 basic block nodes of LOOP_NODE is also processed (before its
1758 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1759 the loop are passed in the *reverse* post-order of the *reverse*
1760 CFG. This is only used by ira_create_allocno_live_ranges, which
1761 wants to visit basic blocks in this order to minimize the number
1762 of elements per live range chain.
1763 Note that the loop tree nodes are still visited in the normal,
1764 forward post-order of the loop tree. */
1767 ira_traverse_loop_tree (bool bb_p
, ira_loop_tree_node_t loop_node
,
1768 void (*preorder_func
) (ira_loop_tree_node_t
),
1769 void (*postorder_func
) (ira_loop_tree_node_t
))
1771 ira_loop_tree_node_t subloop_node
;
1773 ira_assert (loop_node
->bb
== NULL
);
1774 ira_curr_loop_tree_node
= loop_node
;
1775 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1777 if (preorder_func
!= NULL
)
1778 (*preorder_func
) (loop_node
);
1782 auto_vec
<ira_loop_tree_node_t
> loop_preorder
;
1785 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1786 is set up such that nodes in the loop body appear in a pre-order
1787 of their place in the CFG. */
1788 for (subloop_node
= loop_node
->children
;
1789 subloop_node
!= NULL
;
1790 subloop_node
= subloop_node
->next
)
1791 if (subloop_node
->bb
!= NULL
)
1792 loop_preorder
.safe_push (subloop_node
);
1794 if (preorder_func
!= NULL
)
1795 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1796 (*preorder_func
) (subloop_node
);
1798 if (postorder_func
!= NULL
)
1800 vec
<ira_loop_tree_node_t
> loop_rev_postorder
=
1801 ira_loop_tree_body_rev_postorder (loop_node
, loop_preorder
);
1802 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder
, i
, subloop_node
)
1803 (*postorder_func
) (subloop_node
);
1804 loop_rev_postorder
.release ();
1808 for (subloop_node
= loop_node
->subloops
;
1809 subloop_node
!= NULL
;
1810 subloop_node
= subloop_node
->subloop_next
)
1812 ira_assert (subloop_node
->bb
== NULL
);
1813 ira_traverse_loop_tree (bb_p
, subloop_node
,
1814 preorder_func
, postorder_func
);
1817 ira_curr_loop_tree_node
= loop_node
;
1818 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1820 if (postorder_func
!= NULL
)
1821 (*postorder_func
) (loop_node
);
1826 /* The basic block currently being processed. */
1827 static basic_block curr_bb
;
1829 /* This recursive function creates allocnos corresponding to
1830 pseudo-registers containing in X. True OUTPUT_P means that X is
1831 an lvalue. PARENT corresponds to the parent expression of X. */
1833 create_insn_allocnos (rtx x
, rtx outer
, bool output_p
)
1837 enum rtx_code code
= GET_CODE (x
);
1843 if ((regno
= REGNO (x
)) >= FIRST_PSEUDO_REGISTER
)
1847 if ((a
= ira_curr_regno_allocno_map
[regno
]) == NULL
)
1849 a
= ira_create_allocno (regno
, false, ira_curr_loop_tree_node
);
1850 if (outer
!= NULL
&& GET_CODE (outer
) == SUBREG
)
1852 machine_mode wmode
= GET_MODE (outer
);
1853 if (partial_subreg_p (ALLOCNO_WMODE (a
), wmode
))
1854 ALLOCNO_WMODE (a
) = wmode
;
1858 ALLOCNO_NREFS (a
)++;
1859 ALLOCNO_FREQ (a
) += REG_FREQ_FROM_BB (curr_bb
);
1861 bitmap_set_bit (ira_curr_loop_tree_node
->modified_regnos
, regno
);
1865 else if (code
== SET
)
1867 create_insn_allocnos (SET_DEST (x
), NULL
, true);
1868 create_insn_allocnos (SET_SRC (x
), NULL
, false);
1871 else if (code
== CLOBBER
)
1873 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1876 else if (code
== MEM
)
1878 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1881 else if (code
== PRE_DEC
|| code
== POST_DEC
|| code
== PRE_INC
||
1882 code
== POST_INC
|| code
== POST_MODIFY
|| code
== PRE_MODIFY
)
1884 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1885 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1889 fmt
= GET_RTX_FORMAT (code
);
1890 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1893 create_insn_allocnos (XEXP (x
, i
), x
, output_p
);
1894 else if (fmt
[i
] == 'E')
1895 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1896 create_insn_allocnos (XVECEXP (x
, i
, j
), x
, output_p
);
1900 /* Create allocnos corresponding to pseudo-registers living in the
1901 basic block represented by the corresponding loop tree node
1904 create_bb_allocnos (ira_loop_tree_node_t bb_node
)
1911 curr_bb
= bb
= bb_node
->bb
;
1912 ira_assert (bb
!= NULL
);
1913 FOR_BB_INSNS_REVERSE (bb
, insn
)
1914 if (NONDEBUG_INSN_P (insn
))
1915 create_insn_allocnos (PATTERN (insn
), NULL
, false);
1916 /* It might be a allocno living through from one subloop to
1918 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb
), FIRST_PSEUDO_REGISTER
, i
, bi
)
1919 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1920 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1923 /* Create allocnos corresponding to pseudo-registers living on edge E
1924 (a loop entry or exit). Also mark the allocnos as living on the
1927 create_loop_allocnos (edge e
)
1930 bitmap live_in_regs
, border_allocnos
;
1932 ira_loop_tree_node_t parent
;
1934 live_in_regs
= df_get_live_in (e
->dest
);
1935 border_allocnos
= ira_curr_loop_tree_node
->border_allocnos
;
1936 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e
->src
),
1937 FIRST_PSEUDO_REGISTER
, i
, bi
)
1938 if (bitmap_bit_p (live_in_regs
, i
))
1940 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1942 /* The order of creations is important for right
1943 ira_regno_allocno_map. */
1944 if ((parent
= ira_curr_loop_tree_node
->parent
) != NULL
1945 && parent
->regno_allocno_map
[i
] == NULL
)
1946 ira_create_allocno (i
, false, parent
);
1947 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1949 bitmap_set_bit (border_allocnos
,
1950 ALLOCNO_NUM (ira_curr_regno_allocno_map
[i
]));
1954 /* Create allocnos corresponding to pseudo-registers living in loop
1955 represented by the corresponding loop tree node LOOP_NODE. This
1956 function is called by ira_traverse_loop_tree. */
1958 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node
)
1960 if (loop_node
->bb
!= NULL
)
1961 create_bb_allocnos (loop_node
);
1962 else if (loop_node
!= ira_loop_tree_root
)
1969 ira_assert (current_loops
!= NULL
);
1970 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
1971 if (e
->src
!= loop_node
->loop
->latch
)
1972 create_loop_allocnos (e
);
1974 edges
= get_loop_exit_edges (loop_node
->loop
);
1975 FOR_EACH_VEC_ELT (edges
, i
, e
)
1976 create_loop_allocnos (e
);
1981 /* Propagate information about allocnos modified inside the loop given
1982 by its LOOP_TREE_NODE to its parent. */
1984 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node
)
1986 if (loop_tree_node
== ira_loop_tree_root
)
1988 ira_assert (loop_tree_node
->bb
== NULL
);
1989 bitmap_ior_into (loop_tree_node
->parent
->modified_regnos
,
1990 loop_tree_node
->modified_regnos
);
1993 /* Propagate new info about allocno A (see comments about accumulated
1994 info in allocno definition) to the corresponding allocno on upper
1995 loop tree level. So allocnos on upper levels accumulate
1996 information about the corresponding allocnos in nested regions.
1997 The new info means allocno info finally calculated in this
2000 propagate_allocno_info (void)
2003 ira_allocno_t a
, parent_a
;
2004 ira_loop_tree_node_t parent
;
2005 enum reg_class aclass
;
2007 if (flag_ira_region
!= IRA_REGION_ALL
2008 && flag_ira_region
!= IRA_REGION_MIXED
)
2010 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2011 for (a
= ira_regno_allocno_map
[i
];
2013 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2014 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) != NULL
2015 && (parent_a
= parent
->regno_allocno_map
[i
]) != NULL
2016 /* There are no caps yet at this point. So use
2017 border_allocnos to find allocnos for the propagation. */
2018 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a
)->border_allocnos
,
2021 if (! ALLOCNO_BAD_SPILL_P (a
))
2022 ALLOCNO_BAD_SPILL_P (parent_a
) = false;
2023 ALLOCNO_NREFS (parent_a
) += ALLOCNO_NREFS (a
);
2024 ALLOCNO_FREQ (parent_a
) += ALLOCNO_FREQ (a
);
2025 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
2026 merge_hard_reg_conflicts (a
, parent_a
, true);
2027 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
2028 += ALLOCNO_CALLS_CROSSED_NUM (a
);
2029 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
2030 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
2031 ALLOCNO_CROSSED_CALLS_ABIS (parent_a
)
2032 |= ALLOCNO_CROSSED_CALLS_ABIS (a
);
2033 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
)
2034 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
);
2035 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
2036 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2037 aclass
= ALLOCNO_CLASS (a
);
2038 ira_assert (aclass
== ALLOCNO_CLASS (parent_a
));
2039 ira_allocate_and_accumulate_costs
2040 (&ALLOCNO_HARD_REG_COSTS (parent_a
), aclass
,
2041 ALLOCNO_HARD_REG_COSTS (a
));
2042 ira_allocate_and_accumulate_costs
2043 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
),
2045 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
2046 ALLOCNO_CLASS_COST (parent_a
)
2047 += ALLOCNO_CLASS_COST (a
);
2048 ALLOCNO_MEMORY_COST (parent_a
) += ALLOCNO_MEMORY_COST (a
);
2052 /* Create allocnos corresponding to pseudo-registers in the current
2053 function. Traverse the loop tree for this. */
2055 create_allocnos (void)
2057 /* We need to process BB first to correctly link allocnos by member
2058 next_regno_allocno. */
2059 ira_traverse_loop_tree (true, ira_loop_tree_root
,
2060 create_loop_tree_node_allocnos
, NULL
);
2062 ira_traverse_loop_tree (false, ira_loop_tree_root
, NULL
,
2063 propagate_modified_regnos
);
2068 /* The page contains function to remove some regions from a separate
2069 register allocation. We remove regions whose separate allocation
2070 will hardly improve the result. As a result we speed up regional
2071 register allocation. */
2073 /* The function changes the object in range list given by R to OBJ. */
2075 change_object_in_range_list (live_range_t r
, ira_object_t obj
)
2077 for (; r
!= NULL
; r
= r
->next
)
2081 /* Move all live ranges associated with allocno FROM to allocno TO. */
2083 move_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2086 int n
= ALLOCNO_NUM_OBJECTS (from
);
2088 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2090 for (i
= 0; i
< n
; i
++)
2092 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2093 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2094 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2096 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2098 fprintf (ira_dump_file
,
2099 " Moving ranges of a%dr%d to a%dr%d: ",
2100 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2101 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2102 ira_print_live_range_list (ira_dump_file
, lr
);
2104 change_object_in_range_list (lr
, to_obj
);
2105 OBJECT_LIVE_RANGES (to_obj
)
2106 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2107 OBJECT_LIVE_RANGES (from_obj
) = NULL
;
2112 copy_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2115 int n
= ALLOCNO_NUM_OBJECTS (from
);
2117 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2119 for (i
= 0; i
< n
; i
++)
2121 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2122 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2123 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2125 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2127 fprintf (ira_dump_file
, " Copying ranges of a%dr%d to a%dr%d: ",
2128 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2129 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2130 ira_print_live_range_list (ira_dump_file
, lr
);
2132 lr
= ira_copy_live_range_list (lr
);
2133 change_object_in_range_list (lr
, to_obj
);
2134 OBJECT_LIVE_RANGES (to_obj
)
2135 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2139 /* Return TRUE if NODE represents a loop with low register
2142 low_pressure_loop_node_p (ira_loop_tree_node_t node
)
2145 enum reg_class pclass
;
2147 if (node
->bb
!= NULL
)
2150 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2152 pclass
= ira_pressure_classes
[i
];
2153 if (node
->reg_pressure
[pclass
] > ira_class_hard_regs_num
[pclass
]
2154 && ira_class_hard_regs_num
[pclass
] > 1)
2161 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2162 form a region from such loop if the target use stack register
2163 because reg-stack.c cannot deal with such edges. */
2165 loop_with_complex_edge_p (class loop
*loop
)
2173 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
2174 if (e
->flags
& EDGE_EH
)
2176 edges
= get_loop_exit_edges (loop
);
2178 FOR_EACH_VEC_ELT (edges
, i
, e
)
2179 if (e
->flags
& EDGE_COMPLEX
)
2189 /* Sort loops for marking them for removal. We put already marked
2190 loops first, then less frequent loops next, and then outer loops
2193 loop_compare_func (const void *v1p
, const void *v2p
)
2196 ira_loop_tree_node_t l1
= *(const ira_loop_tree_node_t
*) v1p
;
2197 ira_loop_tree_node_t l2
= *(const ira_loop_tree_node_t
*) v2p
;
2199 ira_assert (l1
->parent
!= NULL
&& l2
->parent
!= NULL
);
2200 if (l1
->to_remove_p
&& ! l2
->to_remove_p
)
2202 if (! l1
->to_remove_p
&& l2
->to_remove_p
)
2204 if ((diff
= l1
->loop
->header
->count
.to_frequency (cfun
)
2205 - l2
->loop
->header
->count
.to_frequency (cfun
)) != 0)
2207 if ((diff
= (int) loop_depth (l1
->loop
) - (int) loop_depth (l2
->loop
)) != 0)
2209 /* Make sorting stable. */
2210 return l1
->loop_num
- l2
->loop_num
;
2213 /* Mark loops which should be removed from regional allocation. We
2214 remove a loop with low register pressure inside another loop with
2215 register pressure. In this case a separate allocation of the loop
2216 hardly helps (for irregular register file architecture it could
2217 help by choosing a better hard register in the loop but we prefer
2218 faster allocation even in this case). We also remove cheap loops
2219 if there are more than param_ira_max_loops_num of them. Loop with EH
2220 exit or enter edges are removed too because the allocation might
2221 require put pseudo moves on the EH edges (we could still do this
2222 for pseudos with caller saved hard registers in some cases but it
2223 is impossible to say here or during top-down allocation pass what
2224 hard register the pseudos get finally). */
2226 mark_loops_for_removal (void)
2229 ira_loop_tree_node_t
*sorted_loops
;
2232 ira_assert (current_loops
!= NULL
);
2234 = (ira_loop_tree_node_t
*) ira_allocate (sizeof (ira_loop_tree_node_t
)
2235 * number_of_loops (cfun
));
2236 for (n
= i
= 0; vec_safe_iterate (get_loops (cfun
), i
, &loop
); i
++)
2237 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2239 if (ira_loop_nodes
[i
].parent
== NULL
)
2241 /* Don't remove the root. */
2242 ira_loop_nodes
[i
].to_remove_p
= false;
2245 sorted_loops
[n
++] = &ira_loop_nodes
[i
];
2246 ira_loop_nodes
[i
].to_remove_p
2247 = ((low_pressure_loop_node_p (ira_loop_nodes
[i
].parent
)
2248 && low_pressure_loop_node_p (&ira_loop_nodes
[i
]))
2250 || loop_with_complex_edge_p (ira_loop_nodes
[i
].loop
)
2254 qsort (sorted_loops
, n
, sizeof (ira_loop_tree_node_t
), loop_compare_func
);
2255 for (i
= 0; i
< n
- param_ira_max_loops_num
; i
++)
2257 sorted_loops
[i
]->to_remove_p
= true;
2258 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2261 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2262 sorted_loops
[i
]->loop_num
, sorted_loops
[i
]->loop
->header
->index
,
2263 sorted_loops
[i
]->loop
->header
->count
.to_frequency (cfun
),
2264 loop_depth (sorted_loops
[i
]->loop
),
2265 low_pressure_loop_node_p (sorted_loops
[i
]->parent
)
2266 && low_pressure_loop_node_p (sorted_loops
[i
])
2267 ? "low pressure" : "cheap loop");
2269 ira_free (sorted_loops
);
2272 /* Mark all loops but root for removing. */
2274 mark_all_loops_for_removal (void)
2279 ira_assert (current_loops
!= NULL
);
2280 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
2281 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2283 if (ira_loop_nodes
[i
].parent
== NULL
)
2285 /* Don't remove the root. */
2286 ira_loop_nodes
[i
].to_remove_p
= false;
2289 ira_loop_nodes
[i
].to_remove_p
= true;
2290 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2293 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2294 ira_loop_nodes
[i
].loop_num
,
2295 ira_loop_nodes
[i
].loop
->header
->index
,
2296 ira_loop_nodes
[i
].loop
->header
->count
.to_frequency (cfun
),
2297 loop_depth (ira_loop_nodes
[i
].loop
));
2301 /* Definition of vector of loop tree nodes. */
2303 /* Vec containing references to all removed loop tree nodes. */
2304 static vec
<ira_loop_tree_node_t
> removed_loop_vec
;
2306 /* Vec containing references to all children of loop tree nodes. */
2307 static vec
<ira_loop_tree_node_t
> children_vec
;
2309 /* Remove subregions of NODE if their separate allocation will not
2310 improve the result. */
2312 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node
)
2316 ira_loop_tree_node_t subnode
;
2318 remove_p
= node
->to_remove_p
;
2320 children_vec
.safe_push (node
);
2321 start
= children_vec
.length ();
2322 for (subnode
= node
->children
; subnode
!= NULL
; subnode
= subnode
->next
)
2323 if (subnode
->bb
== NULL
)
2324 remove_uneccesary_loop_nodes_from_loop_tree (subnode
);
2326 children_vec
.safe_push (subnode
);
2327 node
->children
= node
->subloops
= NULL
;
2330 removed_loop_vec
.safe_push (node
);
2333 while (children_vec
.length () > start
)
2335 subnode
= children_vec
.pop ();
2336 subnode
->parent
= node
;
2337 subnode
->next
= node
->children
;
2338 node
->children
= subnode
;
2339 if (subnode
->bb
== NULL
)
2341 subnode
->subloop_next
= node
->subloops
;
2342 node
->subloops
= subnode
;
2347 /* Return TRUE if NODE is inside PARENT. */
2349 loop_is_inside_p (ira_loop_tree_node_t node
, ira_loop_tree_node_t parent
)
2351 for (node
= node
->parent
; node
!= NULL
; node
= node
->parent
)
2357 /* Sort allocnos according to their order in regno allocno list. */
2359 regno_allocno_order_compare_func (const void *v1p
, const void *v2p
)
2361 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2362 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2363 ira_loop_tree_node_t n1
= ALLOCNO_LOOP_TREE_NODE (a1
);
2364 ira_loop_tree_node_t n2
= ALLOCNO_LOOP_TREE_NODE (a2
);
2366 if (loop_is_inside_p (n1
, n2
))
2368 else if (loop_is_inside_p (n2
, n1
))
2370 /* If allocnos are equally good, sort by allocno numbers, so that
2371 the results of qsort leave nothing to chance. We put allocnos
2372 with higher number first in the list because it is the original
2373 order for allocnos from loops on the same levels. */
2374 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2377 /* This array is used to sort allocnos to restore allocno order in
2378 the regno allocno list. */
2379 static ira_allocno_t
*regno_allocnos
;
2381 /* Restore allocno order for REGNO in the regno allocno list. */
2383 ira_rebuild_regno_allocno_list (int regno
)
2388 for (n
= 0, a
= ira_regno_allocno_map
[regno
];
2390 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2391 regno_allocnos
[n
++] = a
;
2393 qsort (regno_allocnos
, n
, sizeof (ira_allocno_t
),
2394 regno_allocno_order_compare_func
);
2395 for (i
= 1; i
< n
; i
++)
2396 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[i
- 1]) = regno_allocnos
[i
];
2397 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[n
- 1]) = NULL
;
2398 ira_regno_allocno_map
[regno
] = regno_allocnos
[0];
2399 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2400 fprintf (ira_dump_file
, " Rebuilding regno allocno list for %d\n", regno
);
2403 /* Propagate info from allocno FROM_A to allocno A. */
2405 propagate_some_info_from_allocno (ira_allocno_t a
, ira_allocno_t from_a
)
2407 enum reg_class aclass
;
2409 merge_hard_reg_conflicts (from_a
, a
, false);
2410 ALLOCNO_NREFS (a
) += ALLOCNO_NREFS (from_a
);
2411 ALLOCNO_FREQ (a
) += ALLOCNO_FREQ (from_a
);
2412 ALLOCNO_CALL_FREQ (a
) += ALLOCNO_CALL_FREQ (from_a
);
2413 ALLOCNO_CALLS_CROSSED_NUM (a
) += ALLOCNO_CALLS_CROSSED_NUM (from_a
);
2414 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
)
2415 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a
);
2416 ALLOCNO_CROSSED_CALLS_ABIS (a
) |= ALLOCNO_CROSSED_CALLS_ABIS (from_a
);
2417 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
)
2418 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a
);
2420 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2421 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a
);
2422 if (! ALLOCNO_BAD_SPILL_P (from_a
))
2423 ALLOCNO_BAD_SPILL_P (a
) = false;
2424 aclass
= ALLOCNO_CLASS (from_a
);
2425 ira_assert (aclass
== ALLOCNO_CLASS (a
));
2426 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
2427 ALLOCNO_HARD_REG_COSTS (from_a
));
2428 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
2430 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a
));
2431 ALLOCNO_CLASS_COST (a
) += ALLOCNO_CLASS_COST (from_a
);
2432 ALLOCNO_MEMORY_COST (a
) += ALLOCNO_MEMORY_COST (from_a
);
2435 /* Remove allocnos from loops removed from the allocation
2438 remove_unnecessary_allocnos (void)
2441 bool merged_p
, rebuild_p
;
2442 ira_allocno_t a
, prev_a
, next_a
, parent_a
;
2443 ira_loop_tree_node_t a_node
, parent
;
2446 regno_allocnos
= NULL
;
2447 for (regno
= max_reg_num () - 1; regno
>= FIRST_PSEUDO_REGISTER
; regno
--)
2450 for (prev_a
= NULL
, a
= ira_regno_allocno_map
[regno
];
2454 next_a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
);
2455 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2456 if (! a_node
->to_remove_p
)
2460 for (parent
= a_node
->parent
;
2461 (parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
2462 && parent
->to_remove_p
;
2463 parent
= parent
->parent
)
2465 if (parent_a
== NULL
)
2467 /* There are no allocnos with the same regno in
2468 upper region -- just move the allocno to the
2471 ALLOCNO_LOOP_TREE_NODE (a
) = parent
;
2472 parent
->regno_allocno_map
[regno
] = a
;
2473 bitmap_set_bit (parent
->all_allocnos
, ALLOCNO_NUM (a
));
2478 /* Remove the allocno and update info of allocno in
2479 the upper region. */
2481 ira_regno_allocno_map
[regno
] = next_a
;
2483 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a
) = next_a
;
2484 move_allocno_live_ranges (a
, parent_a
);
2486 propagate_some_info_from_allocno (parent_a
, a
);
2487 /* Remove it from the corresponding regno allocno
2488 map to avoid info propagation of subsequent
2489 allocno into this already removed allocno. */
2490 a_node
->regno_allocno_map
[regno
] = NULL
;
2491 ira_remove_allocno_prefs (a
);
2497 /* We need to restore the order in regno allocno list. */
2499 if (regno_allocnos
== NULL
)
2501 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
2502 * ira_allocnos_num
);
2503 ira_rebuild_regno_allocno_list (regno
);
2507 ira_rebuild_start_finish_chains ();
2508 if (regno_allocnos
!= NULL
)
2509 ira_free (regno_allocnos
);
2512 /* Remove allocnos from all loops but the root. */
2514 remove_low_level_allocnos (void)
2517 bool merged_p
, propagate_p
;
2518 ira_allocno_t a
, top_a
;
2519 ira_loop_tree_node_t a_node
, parent
;
2520 ira_allocno_iterator ai
;
2523 FOR_EACH_ALLOCNO (a
, ai
)
2525 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2526 if (a_node
== ira_loop_tree_root
|| ALLOCNO_CAP_MEMBER (a
) != NULL
)
2528 regno
= ALLOCNO_REGNO (a
);
2529 if ((top_a
= ira_loop_tree_root
->regno_allocno_map
[regno
]) == NULL
)
2531 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
2532 ira_loop_tree_root
->regno_allocno_map
[regno
] = a
;
2535 propagate_p
= a_node
->parent
->regno_allocno_map
[regno
] == NULL
;
2536 /* Remove the allocno and update info of allocno in the upper
2538 move_allocno_live_ranges (a
, top_a
);
2541 propagate_some_info_from_allocno (top_a
, a
);
2543 FOR_EACH_ALLOCNO (a
, ai
)
2545 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2546 if (a_node
== ira_loop_tree_root
)
2548 parent
= a_node
->parent
;
2549 regno
= ALLOCNO_REGNO (a
);
2550 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2551 ira_assert (ALLOCNO_CAP (a
) != NULL
);
2552 else if (ALLOCNO_CAP (a
) == NULL
)
2553 ira_assert (parent
->regno_allocno_map
[regno
] != NULL
);
2555 FOR_EACH_ALLOCNO (a
, ai
)
2557 regno
= ALLOCNO_REGNO (a
);
2558 if (ira_loop_tree_root
->regno_allocno_map
[regno
] == a
)
2561 ira_allocno_object_iterator oi
;
2563 ira_regno_allocno_map
[regno
] = a
;
2564 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = NULL
;
2565 ALLOCNO_CAP_MEMBER (a
) = NULL
;
2566 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2567 OBJECT_CONFLICT_HARD_REGS (obj
)
2568 = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
);
2570 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a
))
2571 ALLOCNO_NO_STACK_REG_P (a
) = true;
2576 ira_remove_allocno_prefs (a
);
2581 ira_rebuild_start_finish_chains ();
2584 /* Remove loops from consideration. We remove all loops except for
2585 root if ALL_P or loops for which a separate allocation will not
2586 improve the result. We have to do this after allocno creation and
2587 their costs and allocno class evaluation because only after that
2588 the register pressure can be known and is calculated. */
2590 remove_unnecessary_regions (bool all_p
)
2592 if (current_loops
== NULL
)
2595 mark_all_loops_for_removal ();
2597 mark_loops_for_removal ();
2598 children_vec
.create (last_basic_block_for_fn (cfun
)
2599 + number_of_loops (cfun
));
2600 removed_loop_vec
.create (last_basic_block_for_fn (cfun
)
2601 + number_of_loops (cfun
));
2602 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root
);
2603 children_vec
.release ();
2605 remove_low_level_allocnos ();
2607 remove_unnecessary_allocnos ();
2608 while (removed_loop_vec
.length () > 0)
2609 finish_loop_tree_node (removed_loop_vec
.pop ());
2610 removed_loop_vec
.release ();
2615 /* At this point true value of allocno attribute bad_spill_p means
2616 that there is an insn where allocno occurs and where the allocno
2617 cannot be used as memory. The function updates the attribute, now
2618 it can be true only for allocnos which cannot be used as memory in
2619 an insn and in whose live ranges there is other allocno deaths.
2620 Spilling allocnos with true value will not improve the code because
2621 it will not make other allocnos colorable and additional reloads
2622 for the corresponding pseudo will be generated in reload pass for
2623 each insn it occurs.
2625 This is a trick mentioned in one classic article of Chaitin etc
2626 which is frequently omitted in other implementations of RA based on
2629 update_bad_spill_attribute (void)
2633 ira_allocno_iterator ai
;
2634 ira_allocno_object_iterator aoi
;
2637 enum reg_class aclass
;
2638 bitmap_head dead_points
[N_REG_CLASSES
];
2640 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2642 aclass
= ira_allocno_classes
[i
];
2643 bitmap_initialize (&dead_points
[aclass
], ®_obstack
);
2645 FOR_EACH_ALLOCNO (a
, ai
)
2647 aclass
= ALLOCNO_CLASS (a
);
2648 if (aclass
== NO_REGS
)
2650 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2651 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2652 bitmap_set_bit (&dead_points
[aclass
], r
->finish
);
2654 FOR_EACH_ALLOCNO (a
, ai
)
2656 aclass
= ALLOCNO_CLASS (a
);
2657 if (aclass
== NO_REGS
)
2659 if (! ALLOCNO_BAD_SPILL_P (a
))
2661 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2663 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2665 for (i
= r
->start
+ 1; i
< r
->finish
; i
++)
2666 if (bitmap_bit_p (&dead_points
[aclass
], i
))
2673 ALLOCNO_BAD_SPILL_P (a
) = false;
2678 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2680 aclass
= ira_allocno_classes
[i
];
2681 bitmap_clear (&dead_points
[aclass
]);
2687 /* Set up minimal and maximal live range points for allocnos. */
2689 setup_min_max_allocno_live_range_point (void)
2692 ira_allocno_t a
, parent_a
, cap
;
2693 ira_allocno_iterator ai
;
2694 #ifdef ENABLE_IRA_CHECKING
2695 ira_object_iterator oi
;
2699 ira_loop_tree_node_t parent
;
2701 FOR_EACH_ALLOCNO (a
, ai
)
2703 int n
= ALLOCNO_NUM_OBJECTS (a
);
2705 for (i
= 0; i
< n
; i
++)
2707 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2708 r
= OBJECT_LIVE_RANGES (obj
);
2711 OBJECT_MAX (obj
) = r
->finish
;
2712 for (; r
->next
!= NULL
; r
= r
->next
)
2714 OBJECT_MIN (obj
) = r
->start
;
2717 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2718 for (a
= ira_regno_allocno_map
[i
];
2720 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2723 int n
= ALLOCNO_NUM_OBJECTS (a
);
2725 for (j
= 0; j
< n
; j
++)
2727 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
2728 ira_object_t parent_obj
;
2730 if (OBJECT_MAX (obj
) < 0)
2732 /* The object is not used and hence does not live. */
2733 ira_assert (OBJECT_LIVE_RANGES (obj
) == NULL
);
2734 OBJECT_MAX (obj
) = 0;
2735 OBJECT_MIN (obj
) = 1;
2738 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2739 /* Accumulation of range info. */
2740 if (ALLOCNO_CAP (a
) != NULL
)
2742 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
2744 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
2745 if (OBJECT_MAX (cap_obj
) < OBJECT_MAX (obj
))
2746 OBJECT_MAX (cap_obj
) = OBJECT_MAX (obj
);
2747 if (OBJECT_MIN (cap_obj
) > OBJECT_MIN (obj
))
2748 OBJECT_MIN (cap_obj
) = OBJECT_MIN (obj
);
2752 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) == NULL
)
2754 parent_a
= parent
->regno_allocno_map
[i
];
2755 parent_obj
= ALLOCNO_OBJECT (parent_a
, j
);
2756 if (OBJECT_MAX (parent_obj
) < OBJECT_MAX (obj
))
2757 OBJECT_MAX (parent_obj
) = OBJECT_MAX (obj
);
2758 if (OBJECT_MIN (parent_obj
) > OBJECT_MIN (obj
))
2759 OBJECT_MIN (parent_obj
) = OBJECT_MIN (obj
);
2762 #ifdef ENABLE_IRA_CHECKING
2763 FOR_EACH_OBJECT (obj
, oi
)
2765 if ((OBJECT_MIN (obj
) >= 0 && OBJECT_MIN (obj
) <= ira_max_point
)
2766 && (OBJECT_MAX (obj
) >= 0 && OBJECT_MAX (obj
) <= ira_max_point
))
2773 /* Sort allocnos according to their live ranges. Allocnos with
2774 smaller allocno class are put first unless we use priority
2775 coloring. Allocnos with the same class are ordered according
2776 their start (min). Allocnos with the same start are ordered
2777 according their finish (max). */
2779 object_range_compare_func (const void *v1p
, const void *v2p
)
2782 ira_object_t obj1
= *(const ira_object_t
*) v1p
;
2783 ira_object_t obj2
= *(const ira_object_t
*) v2p
;
2784 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
2785 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
2787 if ((diff
= OBJECT_MIN (obj1
) - OBJECT_MIN (obj2
)) != 0)
2789 if ((diff
= OBJECT_MAX (obj1
) - OBJECT_MAX (obj2
)) != 0)
2791 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2794 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2796 sort_conflict_id_map (void)
2800 ira_allocno_iterator ai
;
2803 FOR_EACH_ALLOCNO (a
, ai
)
2805 ira_allocno_object_iterator oi
;
2808 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2809 ira_object_id_map
[num
++] = obj
;
2812 qsort (ira_object_id_map
, num
, sizeof (ira_object_t
),
2813 object_range_compare_func
);
2814 for (i
= 0; i
< num
; i
++)
2816 ira_object_t obj
= ira_object_id_map
[i
];
2818 gcc_assert (obj
!= NULL
);
2819 OBJECT_CONFLICT_ID (obj
) = i
;
2821 for (i
= num
; i
< ira_objects_num
; i
++)
2822 ira_object_id_map
[i
] = NULL
;
2825 /* Set up minimal and maximal conflict ids of allocnos with which
2826 given allocno can conflict. */
2828 setup_min_max_conflict_allocno_ids (void)
2831 int i
, j
, min
, max
, start
, finish
, first_not_finished
, filled_area_start
;
2832 int *live_range_min
, *last_lived
;
2833 int word0_min
, word0_max
;
2835 ira_allocno_iterator ai
;
2837 live_range_min
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
2839 first_not_finished
= -1;
2840 for (i
= 0; i
< ira_objects_num
; i
++)
2842 ira_object_t obj
= ira_object_id_map
[i
];
2847 a
= OBJECT_ALLOCNO (obj
);
2851 aclass
= ALLOCNO_CLASS (a
);
2853 first_not_finished
= i
;
2857 start
= OBJECT_MIN (obj
);
2858 /* If we skip an allocno, the allocno with smaller ids will
2859 be also skipped because of the secondary sorting the
2860 range finishes (see function
2861 object_range_compare_func). */
2862 while (first_not_finished
< i
2863 && start
> OBJECT_MAX (ira_object_id_map
2864 [first_not_finished
]))
2865 first_not_finished
++;
2866 min
= first_not_finished
;
2869 /* We could increase min further in this case but it is good
2872 live_range_min
[i
] = OBJECT_MIN (obj
);
2873 OBJECT_MIN (obj
) = min
;
2875 last_lived
= (int *) ira_allocate (sizeof (int) * ira_max_point
);
2877 filled_area_start
= -1;
2878 for (i
= ira_objects_num
- 1; i
>= 0; i
--)
2880 ira_object_t obj
= ira_object_id_map
[i
];
2885 a
= OBJECT_ALLOCNO (obj
);
2888 aclass
= ALLOCNO_CLASS (a
);
2889 for (j
= 0; j
< ira_max_point
; j
++)
2891 filled_area_start
= ira_max_point
;
2893 min
= live_range_min
[i
];
2894 finish
= OBJECT_MAX (obj
);
2895 max
= last_lived
[finish
];
2897 /* We could decrease max further in this case but it is good
2899 max
= OBJECT_CONFLICT_ID (obj
) - 1;
2900 OBJECT_MAX (obj
) = max
;
2901 /* In filling, we can go further A range finish to recognize
2902 intersection quickly because if the finish of subsequently
2903 processed allocno (it has smaller conflict id) range is
2904 further A range finish than they are definitely intersected
2905 (the reason for this is the allocnos with bigger conflict id
2906 have their range starts not smaller than allocnos with
2908 for (j
= min
; j
< filled_area_start
; j
++)
2910 filled_area_start
= min
;
2912 ira_free (last_lived
);
2913 ira_free (live_range_min
);
2915 /* For allocnos with more than one object, we may later record extra conflicts in
2916 subobject 0 that we cannot really know about here.
2917 For now, simply widen the min/max range of these subobjects. */
2919 word0_min
= INT_MAX
;
2920 word0_max
= INT_MIN
;
2922 FOR_EACH_ALLOCNO (a
, ai
)
2924 int n
= ALLOCNO_NUM_OBJECTS (a
);
2929 obj0
= ALLOCNO_OBJECT (a
, 0);
2930 if (OBJECT_CONFLICT_ID (obj0
) < word0_min
)
2931 word0_min
= OBJECT_CONFLICT_ID (obj0
);
2932 if (OBJECT_CONFLICT_ID (obj0
) > word0_max
)
2933 word0_max
= OBJECT_CONFLICT_ID (obj0
);
2935 FOR_EACH_ALLOCNO (a
, ai
)
2937 int n
= ALLOCNO_NUM_OBJECTS (a
);
2942 obj0
= ALLOCNO_OBJECT (a
, 0);
2943 if (OBJECT_MIN (obj0
) > word0_min
)
2944 OBJECT_MIN (obj0
) = word0_min
;
2945 if (OBJECT_MAX (obj0
) < word0_max
)
2946 OBJECT_MAX (obj0
) = word0_max
;
2956 ira_allocno_iterator ai
;
2957 ira_loop_tree_node_t loop_tree_node
;
2959 FOR_EACH_ALLOCNO (a
, ai
)
2961 if (ALLOCNO_LOOP_TREE_NODE (a
) == ira_loop_tree_root
)
2963 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2964 create_cap_allocno (a
);
2965 else if (ALLOCNO_CAP (a
) == NULL
)
2967 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2968 if (!bitmap_bit_p (loop_tree_node
->border_allocnos
, ALLOCNO_NUM (a
)))
2969 create_cap_allocno (a
);
2976 /* The page contains code transforming more one region internal
2977 representation (IR) to one region IR which is necessary for reload.
2978 This transformation is called IR flattening. We might just rebuild
2979 the IR for one region but we don't do it because it takes a lot of
2982 /* Map: regno -> allocnos which will finally represent the regno for
2983 IR with one region. */
2984 static ira_allocno_t
*regno_top_level_allocno_map
;
2986 /* Find the allocno that corresponds to A at a level one higher up in the
2987 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2989 ira_parent_allocno (ira_allocno_t a
)
2991 ira_loop_tree_node_t parent
;
2993 if (ALLOCNO_CAP (a
) != NULL
)
2996 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3000 return parent
->regno_allocno_map
[ALLOCNO_REGNO (a
)];
3003 /* Find the allocno that corresponds to A at a level one higher up in the
3004 loop tree. If ALLOCNO_CAP is set for A, return that. */
3006 ira_parent_or_cap_allocno (ira_allocno_t a
)
3008 if (ALLOCNO_CAP (a
) != NULL
)
3009 return ALLOCNO_CAP (a
);
3011 return ira_parent_allocno (a
);
3014 /* Process all allocnos originated from pseudo REGNO and copy live
3015 ranges, hard reg conflicts, and allocno stack reg attributes from
3016 low level allocnos to final allocnos which are destinations of
3017 removed stores at a loop exit. Return true if we copied live
3020 copy_info_to_removed_store_destinations (int regno
)
3023 ira_allocno_t parent_a
= NULL
;
3024 ira_loop_tree_node_t parent
;
3028 for (a
= ira_regno_allocno_map
[regno
];
3030 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3032 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))])
3033 /* This allocno will be removed. */
3036 /* Caps will be removed. */
3037 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3038 for (parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3040 parent
= parent
->parent
)
3041 if ((parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
3043 == regno_top_level_allocno_map
[REGNO
3044 (allocno_emit_reg (parent_a
))]
3045 && ALLOCNO_EMIT_DATA (parent_a
)->mem_optimized_dest_p
))
3047 if (parent
== NULL
|| parent_a
== NULL
)
3050 copy_allocno_live_ranges (a
, parent_a
);
3051 merge_hard_reg_conflicts (a
, parent_a
, true);
3053 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
3054 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3055 += ALLOCNO_CALLS_CROSSED_NUM (a
);
3056 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3057 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3058 ALLOCNO_CROSSED_CALLS_ABIS (parent_a
)
3059 |= ALLOCNO_CROSSED_CALLS_ABIS (a
);
3060 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
)
3061 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
);
3062 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3063 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3069 /* Flatten the IR. In other words, this function transforms IR as if
3070 it were built with one region (without loops). We could make it
3071 much simpler by rebuilding IR with one region, but unfortunately it
3072 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3073 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3074 IRA_MAX_POINT before emitting insns on the loop borders. */
3076 ira_flattening (int max_regno_before_emit
, int ira_max_point_before_emit
)
3081 bool new_pseudos_p
, merged_p
, mem_dest_p
;
3083 enum reg_class aclass
;
3084 ira_allocno_t a
, parent_a
, first
, second
, node_first
, node_second
;
3086 ira_loop_tree_node_t node
;
3088 ira_allocno_iterator ai
;
3089 ira_copy_iterator ci
;
3091 regno_top_level_allocno_map
3092 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
3093 * sizeof (ira_allocno_t
));
3094 memset (regno_top_level_allocno_map
, 0,
3095 max_reg_num () * sizeof (ira_allocno_t
));
3096 new_pseudos_p
= merged_p
= false;
3097 FOR_EACH_ALLOCNO (a
, ai
)
3099 ira_allocno_object_iterator oi
;
3102 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3103 /* Caps are not in the regno allocno maps and they are never
3104 will be transformed into allocnos existing after IR
3107 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3108 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
)
3109 = OBJECT_CONFLICT_HARD_REGS (obj
);
3111 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = ALLOCNO_NO_STACK_REG_P (a
);
3114 /* Fix final allocno attributes. */
3115 for (i
= max_regno_before_emit
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
3118 for (a
= ira_regno_allocno_map
[i
];
3120 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3122 ira_emit_data_t parent_data
, data
= ALLOCNO_EMIT_DATA (a
);
3124 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3125 if (data
->somewhere_renamed_p
)
3126 new_pseudos_p
= true;
3127 parent_a
= ira_parent_allocno (a
);
3128 if (parent_a
== NULL
)
3130 ALLOCNO_COPIES (a
) = NULL
;
3131 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3134 ira_assert (ALLOCNO_CAP_MEMBER (parent_a
) == NULL
);
3136 if (data
->mem_optimized_dest
!= NULL
)
3138 parent_data
= ALLOCNO_EMIT_DATA (parent_a
);
3139 if (REGNO (data
->reg
) == REGNO (parent_data
->reg
))
3141 merge_hard_reg_conflicts (a
, parent_a
, true);
3142 move_allocno_live_ranges (a
, parent_a
);
3144 parent_data
->mem_optimized_dest_p
3145 = (parent_data
->mem_optimized_dest_p
3146 || data
->mem_optimized_dest_p
);
3149 new_pseudos_p
= true;
3152 ALLOCNO_NREFS (parent_a
) -= ALLOCNO_NREFS (a
);
3153 ALLOCNO_FREQ (parent_a
) -= ALLOCNO_FREQ (a
);
3154 ALLOCNO_CALL_FREQ (parent_a
) -= ALLOCNO_CALL_FREQ (a
);
3155 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3156 -= ALLOCNO_CALLS_CROSSED_NUM (a
);
3157 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3158 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3159 /* Assume that ALLOCNO_CROSSED_CALLS_ABIS and
3160 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS stay the same.
3161 We'd need to rebuild the IR to do better. */
3162 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3163 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3164 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a
) >= 0
3165 && ALLOCNO_NREFS (parent_a
) >= 0
3166 && ALLOCNO_FREQ (parent_a
) >= 0);
3167 aclass
= ALLOCNO_CLASS (parent_a
);
3168 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
3169 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
3170 && ALLOCNO_HARD_REG_COSTS (parent_a
) != NULL
)
3171 for (j
= 0; j
< hard_regs_num
; j
++)
3172 ALLOCNO_HARD_REG_COSTS (parent_a
)[j
]
3173 -= ALLOCNO_HARD_REG_COSTS (a
)[j
];
3174 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
3175 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
) != NULL
)
3176 for (j
= 0; j
< hard_regs_num
; j
++)
3177 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
)[j
]
3178 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[j
];
3179 ALLOCNO_CLASS_COST (parent_a
)
3180 -= ALLOCNO_CLASS_COST (a
);
3181 ALLOCNO_MEMORY_COST (parent_a
) -= ALLOCNO_MEMORY_COST (a
);
3182 parent_a
= ira_parent_allocno (parent_a
);
3183 if (parent_a
== NULL
)
3186 ALLOCNO_COPIES (a
) = NULL
;
3187 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3189 if (mem_dest_p
&& copy_info_to_removed_store_destinations (i
))
3192 ira_assert (new_pseudos_p
|| ira_max_point_before_emit
== ira_max_point
);
3193 if (merged_p
|| ira_max_point_before_emit
!= ira_max_point
)
3194 ira_rebuild_start_finish_chains ();
3197 sparseset objects_live
;
3199 /* Rebuild conflicts. */
3200 FOR_EACH_ALLOCNO (a
, ai
)
3202 ira_allocno_object_iterator oi
;
3205 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3206 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3208 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3210 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3211 ira_assert (r
->object
== obj
);
3212 clear_conflicts (obj
);
3215 objects_live
= sparseset_alloc (ira_objects_num
);
3216 for (i
= 0; i
< ira_max_point
; i
++)
3218 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
3220 ira_object_t obj
= r
->object
;
3222 a
= OBJECT_ALLOCNO (obj
);
3223 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3224 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3227 aclass
= ALLOCNO_CLASS (a
);
3228 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, n
)
3230 ira_object_t live_obj
= ira_object_id_map
[n
];
3231 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
3232 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
3234 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
3235 /* Don't set up conflict for the allocno with itself. */
3237 ira_add_conflict (obj
, live_obj
);
3239 sparseset_set_bit (objects_live
, OBJECT_CONFLICT_ID (obj
));
3242 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
3243 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
3245 sparseset_free (objects_live
);
3246 compress_conflict_vecs ();
3248 /* Mark some copies for removing and change allocnos in the rest
3250 FOR_EACH_COPY (cp
, ci
)
3252 if (ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
3253 || ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
)
3255 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3257 (ira_dump_file
, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3258 cp
->num
, ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
? 'c' : 'a',
3259 ALLOCNO_NUM (cp
->first
),
3260 REGNO (allocno_emit_reg (cp
->first
)),
3261 ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
? 'c' : 'a',
3262 ALLOCNO_NUM (cp
->second
),
3263 REGNO (allocno_emit_reg (cp
->second
)));
3264 cp
->loop_tree_node
= NULL
;
3268 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->first
))];
3270 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->second
))];
3271 node
= cp
->loop_tree_node
;
3273 keep_p
= true; /* It copy generated in ira-emit.c. */
3276 /* Check that the copy was not propagated from level on
3277 which we will have different pseudos. */
3278 node_first
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->first
)];
3279 node_second
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->second
)];
3280 keep_p
= ((REGNO (allocno_emit_reg (first
))
3281 == REGNO (allocno_emit_reg (node_first
)))
3282 && (REGNO (allocno_emit_reg (second
))
3283 == REGNO (allocno_emit_reg (node_second
))));
3287 cp
->loop_tree_node
= ira_loop_tree_root
;
3289 cp
->second
= second
;
3293 cp
->loop_tree_node
= NULL
;
3294 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3295 fprintf (ira_dump_file
, " Remove cp%d:a%dr%d-a%dr%d\n",
3296 cp
->num
, ALLOCNO_NUM (cp
->first
),
3297 REGNO (allocno_emit_reg (cp
->first
)),
3298 ALLOCNO_NUM (cp
->second
),
3299 REGNO (allocno_emit_reg (cp
->second
)));
3302 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3303 FOR_EACH_ALLOCNO (a
, ai
)
3305 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3306 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3308 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3309 fprintf (ira_dump_file
, " Remove a%dr%d\n",
3310 ALLOCNO_NUM (a
), REGNO (allocno_emit_reg (a
)));
3311 ira_remove_allocno_prefs (a
);
3315 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
3316 ALLOCNO_REGNO (a
) = REGNO (allocno_emit_reg (a
));
3317 ALLOCNO_CAP (a
) = NULL
;
3318 /* Restore updated costs for assignments from reload. */
3319 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
3320 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
3321 if (! ALLOCNO_ASSIGNED_P (a
))
3322 ira_free_allocno_updated_costs (a
);
3323 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3324 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3326 /* Remove unnecessary copies. */
3327 FOR_EACH_COPY (cp
, ci
)
3329 if (cp
->loop_tree_node
== NULL
)
3331 ira_copies
[cp
->num
] = NULL
;
3336 (ALLOCNO_LOOP_TREE_NODE (cp
->first
) == ira_loop_tree_root
3337 && ALLOCNO_LOOP_TREE_NODE (cp
->second
) == ira_loop_tree_root
);
3338 add_allocno_copy_to_list (cp
);
3339 swap_allocno_copy_ends_if_necessary (cp
);
3341 rebuild_regno_allocno_maps ();
3342 if (ira_max_point
!= ira_max_point_before_emit
)
3343 ira_compress_allocno_live_ranges ();
3344 ira_free (regno_top_level_allocno_map
);
3349 #ifdef ENABLE_IRA_CHECKING
3350 /* Check creation of all allocnos. Allocnos on lower levels should
3351 have allocnos or caps on all upper levels. */
3353 check_allocno_creation (void)
3356 ira_allocno_iterator ai
;
3357 ira_loop_tree_node_t loop_tree_node
;
3359 FOR_EACH_ALLOCNO (a
, ai
)
3361 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3362 ira_assert (bitmap_bit_p (loop_tree_node
->all_allocnos
,
3364 if (loop_tree_node
== ira_loop_tree_root
)
3366 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3367 ira_assert (ALLOCNO_CAP (a
) != NULL
);
3368 else if (ALLOCNO_CAP (a
) == NULL
)
3369 ira_assert (loop_tree_node
->parent
3370 ->regno_allocno_map
[ALLOCNO_REGNO (a
)] != NULL
3371 && bitmap_bit_p (loop_tree_node
->border_allocnos
,
3377 /* Identify allocnos which prefer a register class with a single hard register.
3378 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3379 less likely to use the preferred singleton register. */
3381 update_conflict_hard_reg_costs (void)
3384 ira_allocno_iterator ai
;
3387 FOR_EACH_ALLOCNO (a
, ai
)
3389 reg_class_t aclass
= ALLOCNO_CLASS (a
);
3390 reg_class_t pref
= reg_preferred_class (ALLOCNO_REGNO (a
));
3391 int singleton
= ira_class_singleton
[pref
][ALLOCNO_MODE (a
)];
3394 index
= ira_class_hard_reg_index
[(int) aclass
][singleton
];
3397 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) == NULL
3398 || ALLOCNO_HARD_REG_COSTS (a
) == NULL
)
3401 for (i
= ira_class_hard_regs_num
[(int) aclass
] - 1; i
>= 0; i
--)
3402 if (ALLOCNO_HARD_REG_COSTS (a
)[i
] > ALLOCNO_CLASS_COST (a
)
3403 && min
> ALLOCNO_HARD_REG_COSTS (a
)[i
])
3404 min
= ALLOCNO_HARD_REG_COSTS (a
)[i
];
3407 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
3409 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
]
3410 -= min
- ALLOCNO_CLASS_COST (a
);
3414 /* Create a internal representation (IR) for IRA (allocnos, copies,
3415 loop tree nodes). The function returns TRUE if we generate loop
3416 structure (besides nodes representing all function and the basic
3417 blocks) for regional allocation. A true return means that we
3418 really need to flatten IR before the reload. */
3425 initiate_cost_vectors ();
3426 initiate_allocnos ();
3429 create_loop_tree_nodes ();
3433 create_allocno_objects ();
3434 ira_create_allocno_live_ranges ();
3435 remove_unnecessary_regions (false);
3436 ira_compress_allocno_live_ranges ();
3437 update_bad_spill_attribute ();
3438 loops_p
= more_one_region_p ();
3441 propagate_allocno_info ();
3444 ira_tune_allocno_costs ();
3445 #ifdef ENABLE_IRA_CHECKING
3446 check_allocno_creation ();
3448 setup_min_max_allocno_live_range_point ();
3449 sort_conflict_id_map ();
3450 setup_min_max_conflict_allocno_ids ();
3451 ira_build_conflicts ();
3452 update_conflict_hard_reg_costs ();
3453 if (! ira_conflicts_p
)
3456 ira_allocno_iterator ai
;
3458 /* Remove all regions but root one. */
3461 remove_unnecessary_regions (true);
3464 /* We don't save hard registers around calls for fast allocation
3465 -- add caller clobbered registers as conflicting ones to
3466 allocno crossing calls. */
3467 FOR_EACH_ALLOCNO (a
, ai
)
3468 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3469 ior_hard_reg_conflicts (a
, ira_need_caller_save_regs (a
));
3471 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3472 print_copies (ira_dump_file
);
3473 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3474 print_prefs (ira_dump_file
);
3475 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3480 ira_allocno_iterator ai
;
3485 FOR_EACH_ALLOCNO (a
, ai
)
3487 int j
, nobj
= ALLOCNO_NUM_OBJECTS (a
);
3491 for (j
= 0; j
< nobj
; j
++)
3493 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3494 n
+= OBJECT_NUM_CONFLICTS (obj
);
3495 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3499 fprintf (ira_dump_file
, " regions=%d, blocks=%d, points=%d\n",
3500 current_loops
== NULL
? 1 : number_of_loops (cfun
),
3501 n_basic_blocks_for_fn (cfun
), ira_max_point
);
3502 fprintf (ira_dump_file
,
3503 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3504 ira_allocnos_num
, nr_big
, ira_copies_num
, n
, nr
);
3509 /* Release the data created by function ira_build. */
3513 finish_loop_tree_nodes ();
3517 finish_cost_vectors ();
3518 ira_finish_allocno_live_ranges ();